diff --git a/Package.swift b/Package.swift index a2cdcc2579..e3add5bd94 100644 --- a/Package.swift +++ b/Package.swift @@ -140,6 +140,7 @@ let package = Package( .library(name: "SotoDeviceFarm", targets: ["SotoDeviceFarm"]), .library(name: "SotoDirectConnect", targets: ["SotoDirectConnect"]), .library(name: "SotoDirectoryService", targets: ["SotoDirectoryService"]), + .library(name: "SotoDirectoryServiceData", targets: ["SotoDirectoryServiceData"]), .library(name: "SotoDocDB", targets: ["SotoDocDB"]), .library(name: "SotoDocDBElastic", targets: ["SotoDocDBElastic"]), .library(name: "SotoDrs", targets: ["SotoDrs"]), @@ -258,6 +259,7 @@ let package = Package( .library(name: "SotoMarketplaceDeployment", targets: ["SotoMarketplaceDeployment"]), .library(name: "SotoMarketplaceEntitlementService", targets: ["SotoMarketplaceEntitlementService"]), .library(name: "SotoMarketplaceMetering", targets: ["SotoMarketplaceMetering"]), + .library(name: "SotoMarketplaceReporting", targets: ["SotoMarketplaceReporting"]), .library(name: "SotoMediaConnect", targets: ["SotoMediaConnect"]), .library(name: "SotoMediaConvert", targets: ["SotoMediaConvert"]), .library(name: "SotoMediaLive", targets: ["SotoMediaLive"]), @@ -380,6 +382,7 @@ let package = Package( .library(name: "SotoSimSpaceWeaver", targets: ["SotoSimSpaceWeaver"]), .library(name: "SotoSnowDeviceManagement", targets: ["SotoSnowDeviceManagement"]), .library(name: "SotoSnowball", targets: ["SotoSnowball"]), + .library(name: "SotoSocialMessaging", targets: ["SotoSocialMessaging"]), .library(name: "SotoSsmSap", targets: ["SotoSsmSap"]), .library(name: "SotoStorageGateway", targets: ["SotoStorageGateway"]), .library(name: "SotoSupplyChain", targets: ["SotoSupplyChain"]), @@ -406,7 +409,6 @@ let package = Package( .library(name: "SotoWellArchitected", targets: ["SotoWellArchitected"]), .library(name: "SotoWisdom", targets: ["SotoWisdom"]), .library(name: "SotoWorkDocs", targets: ["SotoWorkDocs"]), - .library(name: "SotoWorkLink", targets: ["SotoWorkLink"]), .library(name: "SotoWorkMail", targets: ["SotoWorkMail"]), .library(name: "SotoWorkMailMessageFlow", targets: ["SotoWorkMailMessageFlow"]), .library(name: "SotoWorkSpaces", targets: ["SotoWorkSpaces"]), @@ -530,6 +532,7 @@ let package = Package( .target(name: "SotoDeviceFarm", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DeviceFarm", swiftSettings: swiftSettings), .target(name: "SotoDirectConnect", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DirectConnect", swiftSettings: swiftSettings), .target(name: "SotoDirectoryService", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DirectoryService", swiftSettings: swiftSettings), + .target(name: "SotoDirectoryServiceData", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DirectoryServiceData", swiftSettings: swiftSettings), .target(name: "SotoDocDB", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DocDB", swiftSettings: swiftSettings), .target(name: "SotoDocDBElastic", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/DocDBElastic", swiftSettings: swiftSettings), .target(name: "SotoDrs", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Drs", swiftSettings: swiftSettings), @@ -648,6 +651,7 @@ let package = Package( .target(name: "SotoMarketplaceDeployment", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MarketplaceDeployment", swiftSettings: swiftSettings), .target(name: "SotoMarketplaceEntitlementService", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MarketplaceEntitlementService", swiftSettings: swiftSettings), .target(name: "SotoMarketplaceMetering", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MarketplaceMetering", swiftSettings: swiftSettings), + .target(name: "SotoMarketplaceReporting", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MarketplaceReporting", swiftSettings: swiftSettings), .target(name: "SotoMediaConnect", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MediaConnect", swiftSettings: swiftSettings), .target(name: "SotoMediaConvert", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MediaConvert", swiftSettings: swiftSettings), .target(name: "SotoMediaLive", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/MediaLive", swiftSettings: swiftSettings), @@ -770,6 +774,7 @@ let package = Package( .target(name: "SotoSimSpaceWeaver", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SimSpaceWeaver", swiftSettings: swiftSettings), .target(name: "SotoSnowDeviceManagement", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SnowDeviceManagement", swiftSettings: swiftSettings), .target(name: "SotoSnowball", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Snowball", swiftSettings: swiftSettings), + .target(name: "SotoSocialMessaging", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SocialMessaging", swiftSettings: swiftSettings), .target(name: "SotoSsmSap", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SsmSap", swiftSettings: swiftSettings), .target(name: "SotoStorageGateway", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/StorageGateway", swiftSettings: swiftSettings), .target(name: "SotoSupplyChain", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/SupplyChain", swiftSettings: swiftSettings), @@ -796,7 +801,6 @@ let package = Package( .target(name: "SotoWellArchitected", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/WellArchitected", swiftSettings: swiftSettings), .target(name: "SotoWisdom", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/Wisdom", swiftSettings: swiftSettings), .target(name: "SotoWorkDocs", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/WorkDocs", swiftSettings: swiftSettings), - .target(name: "SotoWorkLink", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/WorkLink", swiftSettings: swiftSettings), .target(name: "SotoWorkMail", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/WorkMail", swiftSettings: swiftSettings), .target(name: "SotoWorkMailMessageFlow", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/WorkMailMessageFlow", swiftSettings: swiftSettings), .target(name: "SotoWorkSpaces", dependencies: [.product(name: "SotoCore", package: "soto-core")], path: "./Sources/Soto/Services/WorkSpaces", swiftSettings: swiftSettings), diff --git a/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift b/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift index 8e69c0e03c..2563020de8 100644 --- a/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift +++ b/Sources/Soto/Services/ACMPCA/ACMPCA_api.swift @@ -150,7 +150,7 @@ public struct ACMPCA: AWSService { /// - certificateAuthorityType: The type of the certificate authority. /// - idempotencyToken: Custom string that can be used to distinguish between calls to the CreateCertificateAuthority action. Idempotency tokens for /// - keyStorageSecurityStandard: Specifies a cryptographic key management compliance standard used for handling CA - /// - revocationConfiguration: Contains information to enable Online Certificate Status Protocol (OCSP) support, to + /// - revocationConfiguration: Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. By default, both certificate validation mechanisms are disabled. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False /// - tags: Key-value pairs that will be attached to the new private CA. You can associate up to /// - usageMode: Specifies whether the CA issues general-purpose certificates that typically require a /// - logger: Logger use during operation @@ -177,10 +177,8 @@ public struct ACMPCA: AWSService { return try await self.createCertificateAuthority(input, logger: logger) } - /// Creates an audit report that lists every time that your CA private key is used. The - /// report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use - /// the private key. Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access - /// policies for CRLs in Amazon S3. Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit + /// Creates an audit report that lists every time that your CA private key is used to issue a certificate. The IssueCertificate and RevokeCertificate actions use + /// the private key. To save the audit report to your designated Amazon S3 bucket, you must create a bucket policy that grants Amazon Web Services Private CA permission to access and write to it. For an example policy, see Prepare an Amazon S3 bucket for audit reports. Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit /// Reports. You can generate a maximum of one report every 30 minutes. @Sendable @inlinable @@ -194,10 +192,8 @@ public struct ACMPCA: AWSService { logger: logger ) } - /// Creates an audit report that lists every time that your CA private key is used. The - /// report is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use - /// the private key. Both Amazon Web Services Private CA and the IAM principal must have permission to write to the S3 bucket that you specify. If the IAM principal making the call does not have permission to write to the bucket, then an exception is thrown. For more information, see Access - /// policies for CRLs in Amazon S3. Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit + /// Creates an audit report that lists every time that your CA private key is used to issue a certificate. The IssueCertificate and RevokeCertificate actions use + /// the private key. To save the audit report to your designated Amazon S3 bucket, you must create a bucket policy that grants Amazon Web Services Private CA permission to access and write to it. For an example policy, see Prepare an Amazon S3 bucket for audit reports. Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your Audit /// Reports. You can generate a maximum of one report every 30 minutes. /// /// Parameters: @@ -721,8 +717,8 @@ public struct ACMPCA: AWSService { /// certificate, if any, that your root CA signed must be next to last. The /// subordinate certificate signed by the preceding subordinate CA must come next, /// and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA - /// certificate or chain. Basic constraints (must be marked critical) Subject alternative names Key usage Extended key usage Authority key identifier Subject key identifier Issuer alternative name Subject directory attributes Subject information access Certificate policies Policy mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following extensions when they are marked critical in an - /// imported CA certificate or chain. Name constraints Policy constraints CRL distribution points Authority information access Freshest CRL Any other extension + /// certificate or chain. Authority key identifier Basic constraints (must be marked critical) Certificate policies Extended key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints Policy mappings Subject alternative name Subject directory attributes Subject key identifier Subject information access Amazon Web Services Private CA rejects the following extensions when they are marked critical in an + /// imported CA certificate or chain. Authority information access CRL distribution points Freshest CRL Policy constraints Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions. @Sendable @inlinable public func importCertificateAuthorityCertificate(_ input: ImportCertificateAuthorityCertificateRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -748,8 +744,8 @@ public struct ACMPCA: AWSService { /// certificate, if any, that your root CA signed must be next to last. The /// subordinate certificate signed by the preceding subordinate CA must come next, /// and so on until your chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA - /// certificate or chain. Basic constraints (must be marked critical) Subject alternative names Key usage Extended key usage Authority key identifier Subject key identifier Issuer alternative name Subject directory attributes Subject information access Certificate policies Policy mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following extensions when they are marked critical in an - /// imported CA certificate or chain. Name constraints Policy constraints CRL distribution points Authority information access Freshest CRL Any other extension + /// certificate or chain. Authority key identifier Basic constraints (must be marked critical) Certificate policies Extended key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints Policy mappings Subject alternative name Subject directory attributes Subject key identifier Subject information access Amazon Web Services Private CA rejects the following extensions when they are marked critical in an + /// imported CA certificate or chain. Authority information access CRL distribution points Freshest CRL Policy constraints Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions. /// /// Parameters: /// - certificate: The PEM-encoded certificate for a private CA. This may be a self-signed certificate in @@ -1237,7 +1233,7 @@ public struct ACMPCA: AWSService { /// /// Parameters: /// - certificateAuthorityArn: Amazon Resource Name (ARN) of the private CA that issued the certificate to be - /// - revocationConfiguration: Contains information to enable Online Certificate Status Protocol (OCSP) support, to + /// - revocationConfiguration: Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. If you don't supply this parameter, existing capibilites remain unchanged. For more /// - status: Status of your private CA. /// - logger: Logger use during operation @inlinable diff --git a/Sources/Soto/Services/ACMPCA/ACMPCA_shapes.swift b/Sources/Soto/Services/ACMPCA/ACMPCA_shapes.swift index 8cc1fd0b4d..62f9c40ec8 100644 --- a/Sources/Soto/Services/ACMPCA/ACMPCA_shapes.swift +++ b/Sources/Soto/Services/ACMPCA/ACMPCA_shapes.swift @@ -531,16 +531,14 @@ extension ACMPCA { /// cannot be created in this region with the specified security standard." For information about security standard support in various Regions, see Storage /// and security compliance of Amazon Web Services Private CA private keys. public let keyStorageSecurityStandard: KeyStorageSecurityStandard? - /// Contains information to enable Online Certificate Status Protocol (OCSP) support, to - /// enable a certificate revocation list (CRL), to enable both, or to enable neither. The - /// default is for both certificate validation mechanisms to be disabled. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False + /// Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. By default, both certificate validation mechanisms are disabled. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False /// parameter, and will fail if other parameters such as CustomCname or /// ExpirationInDays are included. In a CRL configuration, the S3BucketName parameter must conform to /// Amazon S3 /// bucket naming rules. A configuration containing a custom Canonical /// Name (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions /// on the use of special characters in a CNAME. In a CRL or OCSP configuration, the value of a CNAME parameter must not include a - /// protocol prefix such as "http://" or "https://". For more information, see the OcspConfiguration and CrlConfiguration + /// protocol prefix such as "http://" or "https://". For more information, see the OcspConfiguration and CrlConfiguration /// types. public let revocationConfiguration: RevocationConfiguration? /// Key-value pairs that will be attached to the new private CA. You can associate up to @@ -2060,17 +2058,15 @@ extension ACMPCA { /// Amazon Resource Name (ARN) of the private CA that issued the certificate to be /// revoked. This must be of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 public let certificateAuthorityArn: String - /// Contains information to enable Online Certificate Status Protocol (OCSP) support, to - /// enable a certificate revocation list (CRL), to enable both, or to enable neither. If - /// this parameter is not supplied, existing capibilites remain unchanged. For more - /// information, see the OcspConfiguration and CrlConfiguration types. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False + /// Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. If you don't supply this parameter, existing capibilites remain unchanged. For more + /// information, see the OcspConfiguration and CrlConfiguration types. The following requirements apply to revocation configurations. A configuration disabling CRLs or OCSP must contain only the Enabled=False /// parameter, and will fail if other parameters such as CustomCname or /// ExpirationInDays are included. In a CRL configuration, the S3BucketName parameter must conform to /// Amazon S3 /// bucket naming rules. A configuration containing a custom Canonical /// Name (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions /// on the use of special characters in a CNAME. In a CRL or OCSP configuration, the value of a CNAME parameter must not include a - /// protocol prefix such as "http://" or "https://". + /// protocol prefix such as "http://" or "https://". If you update the S3BucketName of CrlConfiguration, you can break revocation for existing certificates. In other words, if you call UpdateCertificateAuthority to update the CRL configuration's S3 bucket name, Amazon Web Services Private CA only writes CRLs to the new S3 bucket. Certificates issued prior to this point will have the old S3 bucket name in your CRL Distribution Point (CDP) extension, essentially breaking revocation. If you must update the S3 bucket, you'll need to reissue old certificates to keep the revocation working. Alternatively, you can use a CustomCname in your CRL configuration if you might need to change the S3 bucket name in the future. public let revocationConfiguration: RevocationConfiguration? /// Status of your private CA. public let status: CertificateAuthorityStatus? diff --git a/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift b/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift index 6639cc5040..9d2e00817f 100644 --- a/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift +++ b/Sources/Soto/Services/APIGateway/APIGateway_shapes.swift @@ -1850,7 +1850,7 @@ extension APIGateway { public let certificateArn: String? /// The name of the certificate that will be used by edge-optimized endpoint for this domain name. public let certificateName: String? - /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. + /// The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate. public let certificateUploadDate: Date? /// The domain name of the Amazon CloudFront distribution associated with this custom domain name for an edge-optimized endpoint. You set up this association when adding a DNS record pointing the custom domain name to this distribution name. For more information about CloudFront distributions, see the Amazon CloudFront documentation. public let distributionDomainName: String? diff --git a/Sources/Soto/Services/Amp/Amp_api.swift b/Sources/Soto/Services/Amp/Amp_api.swift index 45cb958a7e..b6bf8a8be9 100644 --- a/Sources/Soto/Services/Amp/Amp_api.swift +++ b/Sources/Soto/Services/Amp/Amp_api.swift @@ -65,6 +65,7 @@ public struct Amp: AWSService { serviceProtocol: .restjson, apiVersion: "2020-08-01", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: AmpErrorType.self, middleware: middleware, timeout: timeout, @@ -76,6 +77,25 @@ public struct Amp: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack]: .init(endpoints: [ + "ap-northeast-1": "aps.ap-northeast-1.api.aws", + "ap-northeast-2": "aps.ap-northeast-2.api.aws", + "ap-south-1": "aps.ap-south-1.api.aws", + "ap-southeast-1": "aps.ap-southeast-1.api.aws", + "ap-southeast-2": "aps.ap-southeast-2.api.aws", + "eu-central-1": "aps.eu-central-1.api.aws", + "eu-north-1": "aps.eu-north-1.api.aws", + "eu-west-1": "aps.eu-west-1.api.aws", + "eu-west-2": "aps.eu-west-2.api.aws", + "eu-west-3": "aps.eu-west-3.api.aws", + "sa-east-1": "aps.sa-east-1.api.aws", + "us-east-1": "aps.us-east-1.api.aws", + "us-east-2": "aps.us-east-2.api.aws", + "us-west-2": "aps.us-west-2.api.aws" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/Amplify/Amplify_api.swift b/Sources/Soto/Services/Amplify/Amplify_api.swift index cbd45b9121..7679d9284b 100644 --- a/Sources/Soto/Services/Amplify/Amplify_api.swift +++ b/Sources/Soto/Services/Amplify/Amplify_api.swift @@ -287,7 +287,7 @@ public struct Amplify: AWSService { return try await self.createBranch(input, logger: logger) } - /// Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. + /// Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a Git repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. @Sendable @inlinable public func createDeployment(_ input: CreateDeploymentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDeploymentResult { @@ -300,7 +300,7 @@ public struct Amplify: AWSService { logger: logger ) } - /// Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. + /// Creates a deployment for a manually deployed Amplify app. Manually deployed apps are not connected to a Git repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. /// /// Parameters: /// - appId: The unique ID for an Amplify app. @@ -1132,7 +1132,7 @@ public struct Amplify: AWSService { return try await self.listWebhooks(input, logger: logger) } - /// Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. + /// Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a Git repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. @Sendable @inlinable public func startDeployment(_ input: StartDeploymentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartDeploymentResult { @@ -1145,13 +1145,14 @@ public struct Amplify: AWSService { logger: logger ) } - /// Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. + /// Starts a deployment for a manually deployed app. Manually deployed apps are not connected to a Git repository. The maximum duration between the CreateDeployment call and the StartDeployment call cannot exceed 8 hours. If the duration exceeds 8 hours, the StartDeployment call and the associated Job will fail. /// /// Parameters: /// - appId: The unique ID for an Amplify app. - /// - branchName: The name of the branch to use for the job. - /// - jobId: The job ID for this deployment, generated by the create deployment request. - /// - sourceUrl: The source URL for this deployment, used when calling start deployment without create deployment. The source URL can be any HTTP GET URL that is publicly accessible and downloads a single .zip file. + /// - branchName: The name of the branch to use for the deployment job. + /// - jobId: The job ID for this deployment that is generated by the CreateDeployment request. + /// - sourceUrl: The source URL for the deployment that is used when calling StartDeployment without CreateDeployment. The source URL can be either an HTTP GET URL that is publicly accessible and downloads a single .zip file, or an Amazon S3 bucket and prefix. + /// - sourceUrlType: The type of source specified by the sourceURL. If the value is ZIP, the source is a .zip file. If the value is BUCKET_PREFIX, the source is an Amazon S3 bucket and prefix. If no value is specified, the default is ZIP. /// - logger: Logger use during operation @inlinable public func startDeployment( @@ -1159,13 +1160,15 @@ public struct Amplify: AWSService { branchName: String, jobId: String? = nil, sourceUrl: String? = nil, + sourceUrlType: SourceUrlType? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> StartDeploymentResult { let input = StartDeploymentRequest( appId: appId, branchName: branchName, jobId: jobId, - sourceUrl: sourceUrl + sourceUrl: sourceUrl, + sourceUrlType: sourceUrlType ) return try await self.startDeployment(input, logger: logger) } diff --git a/Sources/Soto/Services/Amplify/Amplify_shapes.swift b/Sources/Soto/Services/Amplify/Amplify_shapes.swift index 837f4ea3bc..3532c02c8c 100644 --- a/Sources/Soto/Services/Amplify/Amplify_shapes.swift +++ b/Sources/Soto/Services/Amplify/Amplify_shapes.swift @@ -85,6 +85,12 @@ extension Amplify { public var description: String { return self.rawValue } } + public enum SourceUrlType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bucketPrefix = "BUCKET_PREFIX" + case zip = "ZIP" + public var description: String { return self.rawValue } + } + public enum Stage: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case beta = "BETA" case development = "DEVELOPMENT" @@ -1852,15 +1858,19 @@ extension Amplify { public let jobArn: String /// The unique ID for the job. public let jobId: String - /// The type for the job. If the value is RELEASE, the job was manually released from its source by using the StartJob API. If the value is RETRY, the job was manually retried using the StartJob API. If the value is WEB_HOOK, the job was automatically triggered by webhooks. + /// The type for the job. If the value is RELEASE, the job was manually released from its source by using the StartJob API. This value is available only for apps that are connected to a repository. If the value is RETRY, the job was manually retried using the StartJob API. If the value is WEB_HOOK, the job was automatically triggered by webhooks. If the value is MANUAL, the job is for a manually deployed app. Manually deployed apps are not connected to a Git repository. public let jobType: JobType + /// The source URL for the files to deploy. The source URL can be either an HTTP GET URL that is publicly accessible and downloads a single .zip file, or an Amazon S3 bucket and prefix. + public let sourceUrl: String? + /// The type of source specified by the sourceURL. If the value is ZIP, the source is a .zip file. If the value is BUCKET_PREFIX, the source is an Amazon S3 bucket and prefix. If no value is specified, the default is ZIP. + public let sourceUrlType: SourceUrlType? /// The start date and time for the job. public let startTime: Date /// The current status for the job. public let status: JobStatus @inlinable - public init(commitId: String, commitMessage: String, commitTime: Date, endTime: Date? = nil, jobArn: String, jobId: String, jobType: JobType, startTime: Date, status: JobStatus) { + public init(commitId: String, commitMessage: String, commitTime: Date, endTime: Date? = nil, jobArn: String, jobId: String, jobType: JobType, sourceUrl: String? = nil, sourceUrlType: SourceUrlType? = nil, startTime: Date, status: JobStatus) { self.commitId = commitId self.commitMessage = commitMessage self.commitTime = commitTime @@ -1868,6 +1878,8 @@ extension Amplify { self.jobArn = jobArn self.jobId = jobId self.jobType = jobType + self.sourceUrl = sourceUrl + self.sourceUrlType = sourceUrlType self.startTime = startTime self.status = status } @@ -1880,6 +1892,8 @@ extension Amplify { case jobArn = "jobArn" case jobId = "jobId" case jobType = "jobType" + case sourceUrl = "sourceUrl" + case sourceUrlType = "sourceUrlType" case startTime = "startTime" case status = "status" } @@ -2349,19 +2363,22 @@ extension Amplify { public struct StartDeploymentRequest: AWSEncodableShape { /// The unique ID for an Amplify app. public let appId: String - /// The name of the branch to use for the job. + /// The name of the branch to use for the deployment job. public let branchName: String - /// The job ID for this deployment, generated by the create deployment request. + /// The job ID for this deployment that is generated by the CreateDeployment request. public let jobId: String? - /// The source URL for this deployment, used when calling start deployment without create deployment. The source URL can be any HTTP GET URL that is publicly accessible and downloads a single .zip file. + /// The source URL for the deployment that is used when calling StartDeployment without CreateDeployment. The source URL can be either an HTTP GET URL that is publicly accessible and downloads a single .zip file, or an Amazon S3 bucket and prefix. public let sourceUrl: String? + /// The type of source specified by the sourceURL. If the value is ZIP, the source is a .zip file. If the value is BUCKET_PREFIX, the source is an Amazon S3 bucket and prefix. If no value is specified, the default is ZIP. + public let sourceUrlType: SourceUrlType? @inlinable - public init(appId: String, branchName: String, jobId: String? = nil, sourceUrl: String? = nil) { + public init(appId: String, branchName: String, jobId: String? = nil, sourceUrl: String? = nil, sourceUrlType: SourceUrlType? = nil) { self.appId = appId self.branchName = branchName self.jobId = jobId self.sourceUrl = sourceUrl + self.sourceUrlType = sourceUrlType } public func encode(to encoder: Encoder) throws { @@ -2371,6 +2388,7 @@ extension Amplify { request.encodePath(self.branchName, key: "branchName") try container.encodeIfPresent(self.jobId, forKey: .jobId) try container.encodeIfPresent(self.sourceUrl, forKey: .sourceUrl) + try container.encodeIfPresent(self.sourceUrlType, forKey: .sourceUrlType) } public func validate(name: String) throws { @@ -2383,12 +2401,13 @@ extension Amplify { try self.validate(self.jobId, name: "jobId", parent: name, max: 255) try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[0-9]+$") try self.validate(self.sourceUrl, name: "sourceUrl", parent: name, max: 3000) - try self.validate(self.sourceUrl, name: "sourceUrl", parent: name, pattern: "^(?s)") + try self.validate(self.sourceUrl, name: "sourceUrl", parent: name, pattern: "^(s3|https|http)://") } private enum CodingKeys: String, CodingKey { case jobId = "jobId" case sourceUrl = "sourceUrl" + case sourceUrlType = "sourceUrlType" } } diff --git a/Sources/Soto/Services/AppStream/AppStream_shapes.swift b/Sources/Soto/Services/AppStream/AppStream_shapes.swift index 42c0bfdf59..437fb8b0ac 100644 --- a/Sources/Soto/Services/AppStream/AppStream_shapes.swift +++ b/Sources/Soto/Services/AppStream/AppStream_shapes.swift @@ -32,6 +32,7 @@ extension AppStream { } public enum Action: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case autoTimeZoneRedirection = "AUTO_TIME_ZONE_REDIRECTION" case clipboardCopyFromLocalDevice = "CLIPBOARD_COPY_FROM_LOCAL_DEVICE" case clipboardCopyToLocalDevice = "CLIPBOARD_COPY_TO_LOCAL_DEVICE" case domainPasswordSignin = "DOMAIN_PASSWORD_SIGNIN" diff --git a/Sources/Soto/Services/Appflow/Appflow_shapes.swift b/Sources/Soto/Services/Appflow/Appflow_shapes.swift index 0b3a47a47f..050753551f 100644 --- a/Sources/Soto/Services/Appflow/Appflow_shapes.swift +++ b/Sources/Soto/Services/Appflow/Appflow_shapes.swift @@ -4568,7 +4568,7 @@ extension Appflow { public let clientCredentialsArn: String? /// A JSON web token (JWT) that authorizes Amazon AppFlow to access your Salesforce records. public let jwtToken: String? - /// Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records. You can specify one of the following values: AUTHORIZATION_CODE Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records. CLIENT_CREDENTIALS Amazon AppFlow passes client credentials (a client ID and client secret) when it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account. JWT_BEARER Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records. + /// Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records. You can specify one of the following values: AUTHORIZATION_CODE Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records. JWT_BEARER Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records. The CLIENT_CREDENTIALS value is not supported for Salesforce. public let oAuth2GrantType: OAuth2GrantType? /// The OAuth requirement needed to request security tokens from the connector endpoint. public let oAuthRequest: ConnectorOAuthRequest? @@ -4678,7 +4678,7 @@ extension Appflow { public struct SalesforceMetadata: AWSDecodableShape { /// The Salesforce APIs that you can have Amazon AppFlow use when your flows transfers data to or from Salesforce. public let dataTransferApis: [SalesforceDataTransferApi]? - /// The OAuth 2.0 grant types that Amazon AppFlow can use when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records. AUTHORIZATION_CODE Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records. CLIENT_CREDENTIALS Amazon AppFlow passes client credentials (a client ID and client secret) when it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account. JWT_BEARER Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records. + /// The OAuth 2.0 grant types that Amazon AppFlow can use when it requests an access token from Salesforce. Amazon AppFlow requires an access token each time it attempts to access your Salesforce records. AUTHORIZATION_CODE Amazon AppFlow passes an authorization code when it requests the access token from Salesforce. Amazon AppFlow receives the authorization code from Salesforce after you log in to your Salesforce account and authorize Amazon AppFlow to access your records. JWT_BEARER Amazon AppFlow passes a JSON web token (JWT) when it requests the access token from Salesforce. You provide the JWT to Amazon AppFlow when you define the connection to your Salesforce account. When you use this grant type, you don't need to log in to your Salesforce account to authorize Amazon AppFlow to access your records. The CLIENT_CREDENTIALS value is not supported for Salesforce. public let oauth2GrantTypesSupported: [OAuth2GrantType]? /// The desired authorization scope for the Salesforce account. public let oAuthScopes: [String]? diff --git a/Sources/Soto/Services/Athena/Athena_api.swift b/Sources/Soto/Services/Athena/Athena_api.swift index 4178f66118..f2550b4601 100644 --- a/Sources/Soto/Services/Athena/Athena_api.swift +++ b/Sources/Soto/Services/Athena/Athena_api.swift @@ -311,9 +311,9 @@ public struct Athena: AWSService { /// Parameters: /// - description: A description of the data catalog to be created. /// - name: The name of the data catalog to create. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. - /// - parameters: Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. + /// - parameters: Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection. connection-arn: lambda-role-arn (optional): The execution role to use for the Lambda function. If not provided, one is created. connection-type:MYSQL|REDSHIFT|...., connection-properties:"" For , use escaped JSON text, as in the following example. "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" /// - tags: A list of comma separated tags to add to the data catalog that is created. - /// - type: The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + /// - type: The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass. /// - logger: Logger use during operation @inlinable public func createDataCatalog( diff --git a/Sources/Soto/Services/Athena/Athena_shapes.swift b/Sources/Soto/Services/Athena/Athena_shapes.swift index 2bd88af731..6aae4e7172 100644 --- a/Sources/Soto/Services/Athena/Athena_shapes.swift +++ b/Sources/Soto/Services/Athena/Athena_shapes.swift @@ -67,7 +67,55 @@ extension Athena { public var description: String { return self.rawValue } } + public enum ConnectionType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bigquery = "BIGQUERY" + case clouderahive = "CLOUDERAHIVE" + case clouderaimpala = "CLOUDERAIMPALA" + case cloudwatch = "CLOUDWATCH" + case cloudwatchmetrics = "CLOUDWATCHMETRICS" + case cmdb = "CMDB" + case datalakegen2 = "DATALAKEGEN2" + case db2 = "DB2" + case db2as400 = "DB2AS400" + case documentdb = "DOCUMENTDB" + case dynamodb = "DYNAMODB" + case googlecloudstorage = "GOOGLECLOUDSTORAGE" + case hbase = "HBASE" + case hortonworkshive = "HORTONWORKSHIVE" + case msk = "MSK" + case mysql = "MYSQL" + case neptune = "NEPTUNE" + case opensearch = "OPENSEARCH" + case oracle = "ORACLE" + case postgresql = "POSTGRESQL" + case redis = "REDIS" + case redshift = "REDSHIFT" + case saphana = "SAPHANA" + case snowflake = "SNOWFLAKE" + case sqlserver = "SQLSERVER" + case synapse = "SYNAPSE" + case teradata = "TERADATA" + case timestream = "TIMESTREAM" + case tpcds = "TPCDS" + case vertica = "VERTICA" + public var description: String { return self.rawValue } + } + + public enum DataCatalogStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case createComplete = "CREATE_COMPLETE" + case createFailed = "CREATE_FAILED" + case createFailedCleanupComplete = "CREATE_FAILED_CLEANUP_COMPLETE" + case createFailedCleanupFailed = "CREATE_FAILED_CLEANUP_FAILED" + case createFailedCleanupInProgress = "CREATE_FAILED_CLEANUP_IN_PROGRESS" + case createInProgress = "CREATE_IN_PROGRESS" + case deleteComplete = "DELETE_COMPLETE" + case deleteFailed = "DELETE_FAILED" + case deleteInProgress = "DELETE_IN_PROGRESS" + public var description: String { return self.rawValue } + } + public enum DataCatalogType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case federated = "FEDERATED" case glue = "GLUE" case hive = "HIVE" case lambda = "LAMBDA" @@ -679,11 +727,11 @@ extension Athena { public let description: String? /// The name of the data catalog to create. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. public let name: String - /// Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. + /// Specifies the Lambda function or functions to use for creating the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection. connection-arn: lambda-role-arn (optional): The execution role to use for the Lambda function. If not provided, one is created. connection-type:MYSQL|REDSHIFT|...., connection-properties:"" For , use escaped JSON text, as in the following example. "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" public let parameters: [String: String]? /// A list of comma separated tags to add to the data catalog that is created. public let tags: [Tag]? - /// The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + /// The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass. public let type: DataCatalogType @inlinable @@ -722,7 +770,16 @@ extension Athena { } public struct CreateDataCatalogOutput: AWSDecodableShape { - public init() {} + public let dataCatalog: DataCatalog? + + @inlinable + public init(dataCatalog: DataCatalog? = nil) { + self.dataCatalog = dataCatalog + } + + private enum CodingKeys: String, CodingKey { + case dataCatalog = "DataCatalog" + } } public struct CreateNamedQueryInput: AWSEncodableShape { @@ -975,27 +1032,39 @@ extension Athena { } public struct DataCatalog: AWSDecodableShape { + /// The type of connection for a FEDERATED data catalog (for example, REDSHIFT, MYSQL, or SQLSERVER). For information about individual connectors, see Available data source connectors. + public let connectionType: ConnectionType? /// An optional description of the data catalog. public let description: String? + /// Text of the error that occurred during data catalog creation or deletion. + public let error: String? /// The name of the data catalog. The catalog name must be unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. public let name: String - /// Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. + /// Specifies the Lambda function or functions to use for the data catalog. This is a mapping whose values depend on the catalog type. For the HIVE data catalog type, use the following syntax. The metadata-function parameter is required. The sdk-version parameter is optional and defaults to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number For the LAMBDA data catalog type, use one of the following sets of required parameters, but not both. If you have one Lambda function that processes metadata and another for reading the actual data, use the following syntax. Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn If you have a composite Lambda function that processes both metadata and data, use the following syntax to specify your Lambda function. function=lambda_arn The GLUE type takes a catalog ID parameter and is required. The catalog_id is the account ID of the Amazon Web Services account to which the Glue catalog belongs. catalog-id=catalog_id The GLUE data catalog type also applies to the default AwsDataCatalog that already exists in your account, of which you can have only one and cannot modify. The FEDERATED data catalog type uses one of the following parameters, but not both. Use connection-arn for an existing Glue connection. Use connection-type and connection-properties to specify the configuration setting for a new connection. connection-arn: connection-type:MYSQL|REDSHIFT|...., connection-properties:"" For , use escaped JSON text, as in the following example. "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" public let parameters: [String: String]? - /// The type of data catalog to create: LAMBDA for a federated catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + /// The status of the creation or deletion of the data catalog. The LAMBDA, GLUE, and HIVE data catalog types are created synchronously. Their status is either CREATE_COMPLETE or CREATE_FAILED. The FEDERATED data catalog type is created asynchronously. Data catalog creation status: CREATE_IN_PROGRESS: Federated data catalog creation in progress. CREATE_COMPLETE: Data catalog creation complete. CREATE_FAILED: Data catalog could not be created. CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog creation failed and is being removed. CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation failed and was removed. CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation failed but could not be removed. Data catalog deletion status: DELETE_IN_PROGRESS: Federated data catalog deletion in progress. DELETE_COMPLETE: Federated data catalog deleted. DELETE_FAILED: Federated data catalog could not be deleted. + public let status: DataCatalogStatus? + /// The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is a federated catalog for which Athena creates the connection and the Lambda function for you based on the parameters that you pass. public let type: DataCatalogType @inlinable - public init(description: String? = nil, name: String, parameters: [String: String]? = nil, type: DataCatalogType) { + public init(connectionType: ConnectionType? = nil, description: String? = nil, error: String? = nil, name: String, parameters: [String: String]? = nil, status: DataCatalogStatus? = nil, type: DataCatalogType) { + self.connectionType = connectionType self.description = description + self.error = error self.name = name self.parameters = parameters + self.status = status self.type = type } private enum CodingKeys: String, CodingKey { + case connectionType = "ConnectionType" case description = "Description" + case error = "Error" case name = "Name" case parameters = "Parameters" + case status = "Status" case type = "Type" } } @@ -1003,17 +1072,29 @@ extension Athena { public struct DataCatalogSummary: AWSDecodableShape { /// The name of the data catalog. The catalog name is unique for the Amazon Web Services account and can use a maximum of 127 alphanumeric, underscore, at sign, or hyphen characters. The remainder of the length constraint of 256 is reserved for use by Athena. public let catalogName: String? + /// The type of connection for a FEDERATED data catalog (for example, REDSHIFT, MYSQL, or SQLSERVER). For information about individual connectors, see Available data source connectors. + public let connectionType: ConnectionType? + /// Text of the error that occurred during data catalog creation or deletion. + public let error: String? + /// The status of the creation or deletion of the data catalog. The LAMBDA, GLUE, and HIVE data catalog types are created synchronously. Their status is either CREATE_COMPLETE or CREATE_FAILED. The FEDERATED data catalog type is created asynchronously. Data catalog creation status: CREATE_IN_PROGRESS: Federated data catalog creation in progress. CREATE_COMPLETE: Data catalog creation complete. CREATE_FAILED: Data catalog could not be created. CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog creation failed and is being removed. CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation failed and was removed. CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation failed but could not be removed. Data catalog deletion status: DELETE_IN_PROGRESS: Federated data catalog deletion in progress. DELETE_COMPLETE: Federated data catalog deleted. DELETE_FAILED: Federated data catalog could not be deleted. + public let status: DataCatalogStatus? /// The data catalog type. public let type: DataCatalogType? @inlinable - public init(catalogName: String? = nil, type: DataCatalogType? = nil) { + public init(catalogName: String? = nil, connectionType: ConnectionType? = nil, error: String? = nil, status: DataCatalogStatus? = nil, type: DataCatalogType? = nil) { self.catalogName = catalogName + self.connectionType = connectionType + self.error = error + self.status = status self.type = type } private enum CodingKeys: String, CodingKey { case catalogName = "CatalogName" + case connectionType = "ConnectionType" + case error = "Error" + case status = "Status" case type = "Type" } } @@ -1099,7 +1180,16 @@ extension Athena { } public struct DeleteDataCatalogOutput: AWSDecodableShape { - public init() {} + public let dataCatalog: DataCatalog? + + @inlinable + public init(dataCatalog: DataCatalog? = nil) { + self.dataCatalog = dataCatalog + } + + private enum CodingKeys: String, CodingKey { + case dataCatalog = "DataCatalog" + } } public struct DeleteNamedQueryInput: AWSEncodableShape { diff --git a/Sources/Soto/Services/B2bi/B2bi_api.swift b/Sources/Soto/Services/B2bi/B2bi_api.swift index a6060bb759..5a5de3a78a 100644 --- a/Sources/Soto/Services/B2bi/B2bi_api.swift +++ b/Sources/Soto/Services/B2bi/B2bi_api.swift @@ -141,6 +141,7 @@ public struct B2bi: AWSService { /// /// Parameters: /// - capabilities: Specifies a list of the capabilities associated with this partnership. + /// - capabilityOptions: Specify the structure that contains the details for the associated capabilities. /// - clientToken: Reserved for future use. /// - email: Specifies the email address associated with this trading partner. /// - name: Specifies a descriptive name for the partnership. @@ -151,6 +152,7 @@ public struct B2bi: AWSService { @inlinable public func createPartnership( capabilities: [String], + capabilityOptions: CapabilityOptions? = nil, clientToken: String? = CreatePartnershipRequest.idempotencyToken(), email: String, name: String, @@ -161,6 +163,7 @@ public struct B2bi: AWSService { ) async throws -> CreatePartnershipResponse { let input = CreatePartnershipRequest( capabilities: capabilities, + capabilityOptions: capabilityOptions, clientToken: clientToken, email: email, name: name, @@ -218,7 +221,42 @@ public struct B2bi: AWSService { return try await self.createProfile(input, logger: logger) } - /// Creates a transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Amazon Web Services B2B Data Interchange uses a mapping template in JSONata or XSLT format to transform a customer input file into a JSON or XML file that can be converted to EDI. If you provide a sample EDI file with the same structure as the EDI files that you wish to generate, then the service can generate a mapping template. The starter template contains placeholder values which you can replace with JSONata or XSLT expressions to take data from your input file and insert it into the JSON or XML file that is used to generate the EDI. If you do not provide a sample EDI file, then the service can generate a mapping template based on the EDI settings in the templateDetails parameter. Currently, we only support generating a template that can generate the input to produce an Outbound X12 EDI file. + @Sendable + @inlinable + public func createStarterMappingTemplate(_ input: CreateStarterMappingTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateStarterMappingTemplateResponse { + try await self.client.execute( + operation: "CreateStarterMappingTemplate", + path: "/createmappingstarttemplate", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Amazon Web Services B2B Data Interchange uses a mapping template in JSONata or XSLT format to transform a customer input file into a JSON or XML file that can be converted to EDI. If you provide a sample EDI file with the same structure as the EDI files that you wish to generate, then the service can generate a mapping template. The starter template contains placeholder values which you can replace with JSONata or XSLT expressions to take data from your input file and insert it into the JSON or XML file that is used to generate the EDI. If you do not provide a sample EDI file, then the service can generate a mapping template based on the EDI settings in the templateDetails parameter. Currently, we only support generating a template that can generate the input to produce an Outbound X12 EDI file. + /// + /// Parameters: + /// - mappingType: Specify the format for the mapping template: either JSONATA or XSLT. + /// - outputSampleLocation: Specify the location of the sample EDI file that is used to generate the mapping template. + /// - templateDetails: Describes the details needed for generating the template. Specify the X12 transaction set and version for which the template is used: currently, we only support X12. + /// - logger: Logger use during operation + @inlinable + public func createStarterMappingTemplate( + mappingType: MappingType, + outputSampleLocation: S3Location? = nil, + templateDetails: TemplateDetails, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateStarterMappingTemplateResponse { + let input = CreateStarterMappingTemplateRequest( + mappingType: mappingType, + outputSampleLocation: outputSampleLocation, + templateDetails: templateDetails + ) + return try await self.createStarterMappingTemplate(input, logger: logger) + } + + /// Creates a transformer. Amazon Web Services B2B Data Interchange currently supports two scenarios: Inbound EDI: the Amazon Web Services customer receives an EDI file from their trading partner. Amazon Web Services B2B Data Interchange converts this EDI file into a JSON or XML file with a service-defined structure. A mapping template provided by the customer, in JSONata or XSLT format, is optionally applied to this file to produce a JSON or XML file with the structure the customer requires. Outbound EDI: the Amazon Web Services customer has a JSON or XML file containing data that they wish to use in an EDI file. A mapping template, provided by the customer (in either JSONata or XSLT format) is applied to this file to generate a JSON or XML file in the service-defined structure. This file is then converted to an EDI file. The following fields are provided for backwards compatibility only: fileFormat, mappingTemplate, ediType, and sampleDocument. Use the mapping data type in place of mappingTemplate and fileFormat Use the sampleDocuments data type in place of sampleDocument Use either the inputConversion or outputConversion in place of ediType @Sendable @inlinable public func createTransformer(_ input: CreateTransformerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateTransformerResponse { @@ -231,35 +269,35 @@ public struct B2bi: AWSService { logger: logger ) } - /// Creates a transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Creates a transformer. Amazon Web Services B2B Data Interchange currently supports two scenarios: Inbound EDI: the Amazon Web Services customer receives an EDI file from their trading partner. Amazon Web Services B2B Data Interchange converts this EDI file into a JSON or XML file with a service-defined structure. A mapping template provided by the customer, in JSONata or XSLT format, is optionally applied to this file to produce a JSON or XML file with the structure the customer requires. Outbound EDI: the Amazon Web Services customer has a JSON or XML file containing data that they wish to use in an EDI file. A mapping template, provided by the customer (in either JSONata or XSLT format) is applied to this file to generate a JSON or XML file in the service-defined structure. This file is then converted to an EDI file. The following fields are provided for backwards compatibility only: fileFormat, mappingTemplate, ediType, and sampleDocument. Use the mapping data type in place of mappingTemplate and fileFormat Use the sampleDocuments data type in place of sampleDocument Use either the inputConversion or outputConversion in place of ediType /// /// Parameters: /// - clientToken: Reserved for future use. - /// - ediType: Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - /// - fileFormat: Specifies that the currently supported file formats for EDI transformations are JSON and XML. - /// - mappingTemplate: Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. + /// - inputConversion: Specify the InputConversion object, which contains the format options for the inbound transformation. + /// - mapping: Specify the structure that contains the mapping template and its language (either XSLT or JSONATA). /// - name: Specifies the name of the transformer, used to identify it. - /// - sampleDocument: Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data. + /// - outputConversion: A structure that contains the OutputConversion object, which contains the format options for the outbound transformation. + /// - sampleDocuments: Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. /// - tags: Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose. /// - logger: Logger use during operation @inlinable public func createTransformer( clientToken: String? = CreateTransformerRequest.idempotencyToken(), - ediType: EdiType, - fileFormat: FileFormat, - mappingTemplate: String, + inputConversion: InputConversion? = nil, + mapping: Mapping? = nil, name: String, - sampleDocument: String? = nil, + outputConversion: OutputConversion? = nil, + sampleDocuments: SampleDocuments? = nil, tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateTransformerResponse { let input = CreateTransformerRequest( clientToken: clientToken, - ediType: ediType, - fileFormat: fileFormat, - mappingTemplate: mappingTemplate, + inputConversion: inputConversion, + mapping: mapping, name: name, - sampleDocument: sampleDocument, + outputConversion: outputConversion, + sampleDocuments: sampleDocuments, tags: tags ) return try await self.createTransformer(input, logger: logger) @@ -352,7 +390,7 @@ public struct B2bi: AWSService { return try await self.deleteProfile(input, logger: logger) } - /// Deletes the specified transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Deletes the specified transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. @Sendable @inlinable public func deleteTransformer(_ input: DeleteTransformerRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -365,7 +403,7 @@ public struct B2bi: AWSService { logger: logger ) } - /// Deletes the specified transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Deletes the specified transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. /// /// Parameters: /// - transformerId: Specifies the system-assigned unique identifier for the transformer. @@ -468,7 +506,7 @@ public struct B2bi: AWSService { return try await self.getProfile(input, logger: logger) } - /// Retrieves the details for the transformer specified by the transformer ID. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Retrieves the details for the transformer specified by the transformer ID. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. @Sendable @inlinable public func getTransformer(_ input: GetTransformerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTransformerResponse { @@ -481,7 +519,7 @@ public struct B2bi: AWSService { logger: logger ) } - /// Retrieves the details for the transformer specified by the transformer ID. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Retrieves the details for the transformer specified by the transformer ID. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. /// /// Parameters: /// - transformerId: Specifies the system-assigned unique identifier for the transformer. @@ -657,7 +695,7 @@ public struct B2bi: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Lists the available transformers. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Lists the available transformers. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. @Sendable @inlinable public func listTransformers(_ input: ListTransformersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTransformersResponse { @@ -670,7 +708,7 @@ public struct B2bi: AWSService { logger: logger ) } - /// Lists the available transformers. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Lists the available transformers. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. /// /// Parameters: /// - maxResults: Specifies the number of items to return for the API response. @@ -689,7 +727,7 @@ public struct B2bi: AWSService { return try await self.listTransformers(input, logger: logger) } - /// Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2BI Data Interchange. If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob API to process your files. + /// Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2B Data Interchange. If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob API to process your files. @Sendable @inlinable public func startTransformerJob(_ input: StartTransformerJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartTransformerJobResponse { @@ -702,7 +740,7 @@ public struct B2bi: AWSService { logger: logger ) } - /// Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2BI Data Interchange. If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob API to process your files. + /// Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2B Data Interchange. If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just create and configure a transformer, and then run the StartTransformerJob API to process your files. /// /// Parameters: /// - clientToken: Reserved for future use. @@ -759,6 +797,38 @@ public struct B2bi: AWSService { return try await self.tagResource(input, logger: logger) } + /// This operation mimics the latter half of a typical Outbound EDI request. It takes an input JSON/XML in the B2Bi shape as input, converts it to an X12 EDI string, and return that string. + @Sendable + @inlinable + public func testConversion(_ input: TestConversionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TestConversionResponse { + try await self.client.execute( + operation: "TestConversion", + path: "/testconversion", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This operation mimics the latter half of a typical Outbound EDI request. It takes an input JSON/XML in the B2Bi shape as input, converts it to an X12 EDI string, and return that string. + /// + /// Parameters: + /// - source: Specify the source file for an outbound EDI request. + /// - target: Specify the format (X12 is the only currently supported format), and other details for the conversion target. + /// - logger: Logger use during operation + @inlinable + public func testConversion( + source: ConversionSource, + target: ConversionTarget, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TestConversionResponse { + let input = TestConversionRequest( + source: source, + target: target + ) + return try await self.testConversion(input, logger: logger) + } + /// Maps the input file according to the provided template file. The API call downloads the file contents from the Amazon S3 location, and passes the contents in as a string, to the inputFileContent parameter. @Sendable @inlinable @@ -777,7 +847,7 @@ public struct B2bi: AWSService { /// Parameters: /// - fileFormat: Specifies that the currently supported file formats for EDI transformations are JSON and XML. /// - inputFileContent: Specify the contents of the EDI (electronic data interchange) XML or JSON file that is used as input for the transform. - /// - mappingTemplate: Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. + /// - mappingTemplate: Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. This parameter is available for backwards compatibility. Use the Mapping data type instead. /// - logger: Logger use during operation @inlinable public func testMapping( @@ -916,18 +986,21 @@ public struct B2bi: AWSService { /// /// Parameters: /// - capabilities: List of the capabilities associated with this partnership. + /// - capabilityOptions: To update, specify the structure that contains the details for the associated capabilities. /// - name: The name of the partnership, used to identify it. /// - partnershipId: Specifies the unique, system-generated identifier for a partnership. /// - logger: Logger use during operation @inlinable public func updatePartnership( capabilities: [String]? = nil, + capabilityOptions: CapabilityOptions? = nil, name: String? = nil, partnershipId: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdatePartnershipResponse { let input = UpdatePartnershipRequest( capabilities: capabilities, + capabilityOptions: capabilityOptions, name: name, partnershipId: partnershipId ) @@ -975,7 +1048,7 @@ public struct B2bi: AWSService { return try await self.updateProfile(input, logger: logger) } - /// Updates the specified parameters for a transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Updates the specified parameters for a transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. @Sendable @inlinable public func updateTransformer(_ input: UpdateTransformerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTransformerResponse { @@ -988,34 +1061,34 @@ public struct B2bi: AWSService { logger: logger ) } - /// Updates the specified parameters for a transformer. A transformer describes how to process the incoming EDI documents and extract the necessary information to the output file. + /// Updates the specified parameters for a transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively, a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file. /// /// Parameters: - /// - ediType: Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - /// - fileFormat: Specifies that the currently supported file formats for EDI transformations are JSON and XML. - /// - mappingTemplate: Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. + /// - inputConversion: To update, specify the InputConversion object, which contains the format options for the inbound transformation. + /// - mapping: Specify the structure that contains the mapping template and its language (either XSLT or JSONATA). /// - name: Specify a new name for the transformer, if you want to update it. - /// - sampleDocument: Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data. + /// - outputConversion: To update, specify the OutputConversion object, which contains the format options for the outbound transformation. + /// - sampleDocuments: Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. /// - status: Specifies the transformer's status. You can update the state of the transformer, from active to inactive, or inactive to active. /// - transformerId: Specifies the system-assigned unique identifier for the transformer. /// - logger: Logger use during operation @inlinable public func updateTransformer( - ediType: EdiType? = nil, - fileFormat: FileFormat? = nil, - mappingTemplate: String? = nil, + inputConversion: InputConversion? = nil, + mapping: Mapping? = nil, name: String? = nil, - sampleDocument: String? = nil, + outputConversion: OutputConversion? = nil, + sampleDocuments: SampleDocuments? = nil, status: TransformerStatus? = nil, transformerId: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateTransformerResponse { let input = UpdateTransformerRequest( - ediType: ediType, - fileFormat: fileFormat, - mappingTemplate: mappingTemplate, + inputConversion: inputConversion, + mapping: mapping, name: name, - sampleDocument: sampleDocument, + outputConversion: outputConversion, + sampleDocuments: sampleDocuments, status: status, transformerId: transformerId ) diff --git a/Sources/Soto/Services/B2bi/B2bi_shapes.swift b/Sources/Soto/Services/B2bi/B2bi_shapes.swift index 54c6feea89..9260c93bab 100644 --- a/Sources/Soto/Services/B2bi/B2bi_shapes.swift +++ b/Sources/Soto/Services/B2bi/B2bi_shapes.swift @@ -26,23 +26,63 @@ import Foundation extension B2bi { // MARK: Enums + public enum CapabilityDirection: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case inbound = "INBOUND" + case outbound = "OUTBOUND" + public var description: String { return self.rawValue } + } + public enum CapabilityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case edi = "edi" public var description: String { return self.rawValue } } + public enum ConversionSourceFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case json = "JSON" + case xml = "XML" + public var description: String { return self.rawValue } + } + + public enum ConversionTargetFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case x12 = "X12" + public var description: String { return self.rawValue } + } + public enum FileFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case json = "JSON" + case notUsed = "NOT_USED" case xml = "XML" public var description: String { return self.rawValue } } + public enum FromFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case x12 = "X12" + public var description: String { return self.rawValue } + } + public enum Logging: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" public var description: String { return self.rawValue } } + public enum MappingTemplateLanguage: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case jsonata = "JSONATA" + case xslt = "XSLT" + public var description: String { return self.rawValue } + } + + public enum MappingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case jsonata = "JSONATA" + case xslt = "XSLT" + public var description: String { return self.rawValue } + } + + public enum ToFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case x12 = "X12" + public var description: String { return self.rawValue } + } + public enum TransformerJobStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case failed = "failed" case running = "running" @@ -145,6 +185,24 @@ extension B2bi { // MARK: Shapes + public struct CapabilityOptions: AWSEncodableShape & AWSDecodableShape { + /// A structure that contains the outbound EDI options. + public let outboundEdi: OutboundEdiOptions? + + @inlinable + public init(outboundEdi: OutboundEdiOptions? = nil) { + self.outboundEdi = outboundEdi + } + + public func validate(name: String) throws { + try self.outboundEdi?.validate(name: "\(name).outboundEdi") + } + + private enum CodingKeys: String, CodingKey { + case outboundEdi = "outboundEdi" + } + } + public struct CapabilitySummary: AWSDecodableShape { /// Returns a system-assigned unique identifier for the capability. public let capabilityId: String @@ -177,6 +235,51 @@ extension B2bi { } } + public struct ConversionSource: AWSEncodableShape { + /// The format for the input file: either JSON or XML. + public let fileFormat: ConversionSourceFormat + /// File to be converted + public let inputFile: InputFileSource + + @inlinable + public init(fileFormat: ConversionSourceFormat, inputFile: InputFileSource) { + self.fileFormat = fileFormat + self.inputFile = inputFile + } + + private enum CodingKeys: String, CodingKey { + case fileFormat = "fileFormat" + case inputFile = "inputFile" + } + } + + public struct ConversionTarget: AWSEncodableShape { + /// Currently, only X12 format is supported. + public let fileFormat: ConversionTargetFormat + /// A structure that contains the formatting details for the conversion target. + public let formatDetails: ConversionTargetFormatDetails? + /// Customer uses this to provide a sample on what should file look like after conversion + /// X12 EDI use case around this would be discovering the file syntax + public let outputSampleFile: OutputSampleFileSource? + + @inlinable + public init(fileFormat: ConversionTargetFormat, formatDetails: ConversionTargetFormatDetails? = nil, outputSampleFile: OutputSampleFileSource? = nil) { + self.fileFormat = fileFormat + self.formatDetails = formatDetails + self.outputSampleFile = outputSampleFile + } + + public func validate(name: String) throws { + try self.outputSampleFile?.validate(name: "\(name).outputSampleFile") + } + + private enum CodingKeys: String, CodingKey { + case fileFormat = "fileFormat" + case formatDetails = "formatDetails" + case outputSampleFile = "outputSampleFile" + } + } + public struct CreateCapabilityRequest: AWSEncodableShape { /// Reserved for future use. public let clientToken: String? @@ -267,6 +370,8 @@ extension B2bi { public struct CreatePartnershipRequest: AWSEncodableShape { /// Specifies a list of the capabilities associated with this partnership. public let capabilities: [String] + /// Specify the structure that contains the details for the associated capabilities. + public let capabilityOptions: CapabilityOptions? /// Reserved for future use. public let clientToken: String? /// Specifies the email address associated with this trading partner. @@ -281,8 +386,9 @@ extension B2bi { public let tags: [Tag]? @inlinable - public init(capabilities: [String], clientToken: String? = CreatePartnershipRequest.idempotencyToken(), email: String, name: String, phone: String? = nil, profileId: String, tags: [Tag]? = nil) { + public init(capabilities: [String], capabilityOptions: CapabilityOptions? = nil, clientToken: String? = CreatePartnershipRequest.idempotencyToken(), email: String, name: String, phone: String? = nil, profileId: String, tags: [Tag]? = nil) { self.capabilities = capabilities + self.capabilityOptions = capabilityOptions self.clientToken = clientToken self.email = email self.name = name @@ -297,6 +403,7 @@ extension B2bi { try validate($0, name: "capabilities[]", parent: name, min: 1) try validate($0, name: "capabilities[]", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } + try self.capabilityOptions?.validate(name: "\(name).capabilityOptions") try self.validate(self.email, name: "email", parent: name, max: 254) try self.validate(self.email, name: "email", parent: name, min: 5) try self.validate(self.email, name: "email", parent: name, pattern: "^[\\w\\.\\-]+@[\\w\\.\\-]+$") @@ -316,6 +423,7 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case capabilities = "capabilities" + case capabilityOptions = "capabilityOptions" case clientToken = "clientToken" case email = "email" case name = "name" @@ -328,6 +436,8 @@ extension B2bi { public struct CreatePartnershipResponse: AWSDecodableShape { /// Returns one or more capabilities associated with this partnership. public let capabilities: [String]? + /// Returns the structure that contains the details for the associated capabilities. + public let capabilityOptions: CapabilityOptions? /// Returns a timestamp for creation date and time of the partnership. @CustomCoding public var createdAt: Date @@ -347,8 +457,9 @@ extension B2bi { public let tradingPartnerId: String? @inlinable - public init(capabilities: [String]? = nil, createdAt: Date, email: String? = nil, name: String? = nil, partnershipArn: String, partnershipId: String, phone: String? = nil, profileId: String, tradingPartnerId: String? = nil) { + public init(capabilities: [String]? = nil, capabilityOptions: CapabilityOptions? = nil, createdAt: Date, email: String? = nil, name: String? = nil, partnershipArn: String, partnershipId: String, phone: String? = nil, profileId: String, tradingPartnerId: String? = nil) { self.capabilities = capabilities + self.capabilityOptions = capabilityOptions self.createdAt = createdAt self.email = email self.name = name @@ -361,6 +472,7 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case capabilities = "capabilities" + case capabilityOptions = "capabilityOptions" case createdAt = "createdAt" case email = "email" case name = "name" @@ -474,38 +586,109 @@ extension B2bi { } } + public struct CreateStarterMappingTemplateRequest: AWSEncodableShape { + /// Specify the format for the mapping template: either JSONATA or XSLT. + public let mappingType: MappingType + /// Specify the location of the sample EDI file that is used to generate the mapping template. + public let outputSampleLocation: S3Location? + /// Describes the details needed for generating the template. Specify the X12 transaction set and version for which the template is used: currently, we only support X12. + public let templateDetails: TemplateDetails + + @inlinable + public init(mappingType: MappingType, outputSampleLocation: S3Location? = nil, templateDetails: TemplateDetails) { + self.mappingType = mappingType + self.outputSampleLocation = outputSampleLocation + self.templateDetails = templateDetails + } + + public func validate(name: String) throws { + try self.outputSampleLocation?.validate(name: "\(name).outputSampleLocation") + } + + private enum CodingKeys: String, CodingKey { + case mappingType = "mappingType" + case outputSampleLocation = "outputSampleLocation" + case templateDetails = "templateDetails" + } + } + + public struct CreateStarterMappingTemplateResponse: AWSDecodableShape { + /// Returns a string that represents the mapping template. + public let mappingTemplate: String + + @inlinable + public init(mappingTemplate: String) { + self.mappingTemplate = mappingTemplate + } + + private enum CodingKeys: String, CodingKey { + case mappingTemplate = "mappingTemplate" + } + } + public struct CreateTransformerRequest: AWSEncodableShape { /// Reserved for future use. public let clientToken: String? /// Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - public let ediType: EdiType + public let ediType: EdiType? /// Specifies that the currently supported file formats for EDI transformations are JSON and XML. - public let fileFormat: FileFormat - /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. - public let mappingTemplate: String + public let fileFormat: FileFormat? + /// Specify the InputConversion object, which contains the format options for the inbound transformation. + public let inputConversion: InputConversion? + /// Specify the structure that contains the mapping template and its language (either XSLT or JSONATA). + public let mapping: Mapping? + /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. This parameter is available for backwards compatibility. Use the Mapping data type instead. + public let mappingTemplate: String? /// Specifies the name of the transformer, used to identify it. public let name: String + /// A structure that contains the OutputConversion object, which contains the format options for the outbound transformation. + public let outputConversion: OutputConversion? /// Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data. public let sampleDocument: String? + /// Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. + public let sampleDocuments: SampleDocuments? /// Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose. public let tags: [Tag]? @inlinable - public init(clientToken: String? = CreateTransformerRequest.idempotencyToken(), ediType: EdiType, fileFormat: FileFormat, mappingTemplate: String, name: String, sampleDocument: String? = nil, tags: [Tag]? = nil) { + public init(clientToken: String? = CreateTransformerRequest.idempotencyToken(), inputConversion: InputConversion? = nil, mapping: Mapping? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocuments: SampleDocuments? = nil, tags: [Tag]? = nil) { + self.clientToken = clientToken + self.ediType = nil + self.fileFormat = nil + self.inputConversion = inputConversion + self.mapping = mapping + self.mappingTemplate = nil + self.name = name + self.outputConversion = outputConversion + self.sampleDocument = nil + self.sampleDocuments = sampleDocuments + self.tags = tags + } + + @available(*, deprecated, message: "Members ediType, fileFormat, mappingTemplate, sampleDocument have been deprecated") + @inlinable + public init(clientToken: String? = CreateTransformerRequest.idempotencyToken(), ediType: EdiType? = nil, fileFormat: FileFormat? = nil, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, mappingTemplate: String? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocument: String? = nil, sampleDocuments: SampleDocuments? = nil, tags: [Tag]? = nil) { self.clientToken = clientToken self.ediType = ediType self.fileFormat = fileFormat + self.inputConversion = inputConversion + self.mapping = mapping self.mappingTemplate = mappingTemplate self.name = name + self.outputConversion = outputConversion self.sampleDocument = sampleDocument + self.sampleDocuments = sampleDocuments self.tags = tags } public func validate(name: String) throws { + try self.mapping?.validate(name: "\(name).mapping") try self.validate(self.mappingTemplate, name: "mappingTemplate", parent: name, max: 350000) try self.validate(self.name, name: "name", parent: name, max: 254) try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_-]{1,512}$") try self.validate(self.sampleDocument, name: "sampleDocument", parent: name, max: 1024) + try self.sampleDocuments?.validate(name: "\(name).sampleDocuments") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") } @@ -516,9 +699,13 @@ extension B2bi { case clientToken = "clientToken" case ediType = "ediType" case fileFormat = "fileFormat" + case inputConversion = "inputConversion" + case mapping = "mapping" case mappingTemplate = "mappingTemplate" case name = "name" + case outputConversion = "outputConversion" case sampleDocument = "sampleDocument" + case sampleDocuments = "sampleDocuments" case tags = "tags" } } @@ -528,15 +715,23 @@ extension B2bi { @CustomCoding public var createdAt: Date /// Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - public let ediType: EdiType + public let ediType: EdiType? /// Returns that the currently supported file formats for EDI transformations are JSON and XML. - public let fileFormat: FileFormat + public let fileFormat: FileFormat? + /// Returns the InputConversion object, which contains the format options for the inbound transformation. + public let inputConversion: InputConversion? + /// Returns the structure that contains the mapping template and its language (either XSLT or JSONATA). + public let mapping: Mapping? /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. - public let mappingTemplate: String + public let mappingTemplate: String? /// Returns the name of the transformer, used to identify it. public let name: String + /// Returns the OutputConversion object, which contains the format options for the outbound transformation. + public let outputConversion: OutputConversion? /// Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data. public let sampleDocument: String? + /// Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. + public let sampleDocuments: SampleDocuments? /// Returns the state of the newly created transformer. The transformer can be either active or inactive. For the transformer to be used in a capability, its status must active. public let status: TransformerStatus /// Returns an Amazon Resource Name (ARN) for a specific Amazon Web Services resource, such as a capability, partnership, profile, or transformer. @@ -545,13 +740,35 @@ extension B2bi { public let transformerId: String @inlinable - public init(createdAt: Date, ediType: EdiType, fileFormat: FileFormat, mappingTemplate: String, name: String, sampleDocument: String? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { + public init(createdAt: Date, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { + self.createdAt = createdAt + self.ediType = nil + self.fileFormat = nil + self.inputConversion = inputConversion + self.mapping = mapping + self.mappingTemplate = nil + self.name = name + self.outputConversion = outputConversion + self.sampleDocument = nil + self.sampleDocuments = sampleDocuments + self.status = status + self.transformerArn = transformerArn + self.transformerId = transformerId + } + + @available(*, deprecated, message: "Members ediType, fileFormat, mappingTemplate, sampleDocument have been deprecated") + @inlinable + public init(createdAt: Date, ediType: EdiType? = nil, fileFormat: FileFormat? = nil, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, mappingTemplate: String? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocument: String? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { self.createdAt = createdAt self.ediType = ediType self.fileFormat = fileFormat + self.inputConversion = inputConversion + self.mapping = mapping self.mappingTemplate = mappingTemplate self.name = name + self.outputConversion = outputConversion self.sampleDocument = sampleDocument + self.sampleDocuments = sampleDocuments self.status = status self.transformerArn = transformerArn self.transformerId = transformerId @@ -561,9 +778,13 @@ extension B2bi { case createdAt = "createdAt" case ediType = "ediType" case fileFormat = "fileFormat" + case inputConversion = "inputConversion" + case mapping = "mapping" case mappingTemplate = "mappingTemplate" case name = "name" + case outputConversion = "outputConversion" case sampleDocument = "sampleDocument" + case sampleDocuments = "sampleDocuments" case status = "status" case transformerArn = "transformerArn" case transformerId = "transformerId" @@ -667,6 +888,8 @@ extension B2bi { } public struct EdiConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specifies whether this is capability is for inbound or outbound transformations. + public let capabilityDirection: CapabilityDirection? /// Contains the Amazon S3 bucket and prefix for the location of the input file, which is contained in an S3Location object. public let inputLocation: S3Location /// Contains the Amazon S3 bucket and prefix for the location of the output file, which is contained in an S3Location object. @@ -677,7 +900,8 @@ extension B2bi { public let type: EdiType @inlinable - public init(inputLocation: S3Location, outputLocation: S3Location, transformerId: String, type: EdiType) { + public init(capabilityDirection: CapabilityDirection? = nil, inputLocation: S3Location, outputLocation: S3Location, transformerId: String, type: EdiType) { + self.capabilityDirection = capabilityDirection self.inputLocation = inputLocation self.outputLocation = outputLocation self.transformerId = transformerId @@ -693,6 +917,7 @@ extension B2bi { } private enum CodingKeys: String, CodingKey { + case capabilityDirection = "capabilityDirection" case inputLocation = "inputLocation" case outputLocation = "outputLocation" case transformerId = "transformerId" @@ -795,6 +1020,7 @@ extension B2bi { public struct GetPartnershipResponse: AWSDecodableShape { /// Returns one or more capabilities associated with this partnership. public let capabilities: [String]? + public let capabilityOptions: CapabilityOptions? /// Returns a timestamp for creation date and time of the partnership. @CustomCoding public var createdAt: Date @@ -817,8 +1043,9 @@ extension B2bi { public let tradingPartnerId: String? @inlinable - public init(capabilities: [String]? = nil, createdAt: Date, email: String? = nil, modifiedAt: Date? = nil, name: String? = nil, partnershipArn: String, partnershipId: String, phone: String? = nil, profileId: String, tradingPartnerId: String? = nil) { + public init(capabilities: [String]? = nil, capabilityOptions: CapabilityOptions? = nil, createdAt: Date, email: String? = nil, modifiedAt: Date? = nil, name: String? = nil, partnershipArn: String, partnershipId: String, phone: String? = nil, profileId: String, tradingPartnerId: String? = nil) { self.capabilities = capabilities + self.capabilityOptions = capabilityOptions self.createdAt = createdAt self.email = email self.modifiedAt = modifiedAt @@ -832,6 +1059,7 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case capabilities = "capabilities" + case capabilityOptions = "capabilityOptions" case createdAt = "createdAt" case email = "email" case modifiedAt = "modifiedAt" @@ -1002,18 +1230,26 @@ extension B2bi { @CustomCoding public var createdAt: Date /// Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - public let ediType: EdiType + public let ediType: EdiType? /// Returns that the currently supported file formats for EDI transformations are JSON and XML. - public let fileFormat: FileFormat + public let fileFormat: FileFormat? + /// Returns the InputConversion object, which contains the format options for the inbound transformation. + public let inputConversion: InputConversion? + /// Returns the structure that contains the mapping template and its language (either XSLT or JSONATA). + public let mapping: Mapping? /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. - public let mappingTemplate: String + public let mappingTemplate: String? /// Returns a timestamp for last time the transformer was modified. @OptionalCustomCoding public var modifiedAt: Date? /// Returns the name of the transformer, used to identify it. public let name: String + /// Returns the OutputConversion object, which contains the format options for the outbound transformation. + public let outputConversion: OutputConversion? /// Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data. public let sampleDocument: String? + /// Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. + public let sampleDocuments: SampleDocuments? /// Returns the state of the newly created transformer. The transformer can be either active or inactive. For the transformer to be used in a capability, its status must active. public let status: TransformerStatus /// Returns an Amazon Resource Name (ARN) for a specific Amazon Web Services resource, such as a capability, partnership, profile, or transformer. @@ -1022,14 +1258,37 @@ extension B2bi { public let transformerId: String @inlinable - public init(createdAt: Date, ediType: EdiType, fileFormat: FileFormat, mappingTemplate: String, modifiedAt: Date? = nil, name: String, sampleDocument: String? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { + public init(createdAt: Date, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, modifiedAt: Date? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { + self.createdAt = createdAt + self.ediType = nil + self.fileFormat = nil + self.inputConversion = inputConversion + self.mapping = mapping + self.mappingTemplate = nil + self.modifiedAt = modifiedAt + self.name = name + self.outputConversion = outputConversion + self.sampleDocument = nil + self.sampleDocuments = sampleDocuments + self.status = status + self.transformerArn = transformerArn + self.transformerId = transformerId + } + + @available(*, deprecated, message: "Members ediType, fileFormat, mappingTemplate, sampleDocument have been deprecated") + @inlinable + public init(createdAt: Date, ediType: EdiType? = nil, fileFormat: FileFormat? = nil, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, mappingTemplate: String? = nil, modifiedAt: Date? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocument: String? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { self.createdAt = createdAt self.ediType = ediType self.fileFormat = fileFormat + self.inputConversion = inputConversion + self.mapping = mapping self.mappingTemplate = mappingTemplate self.modifiedAt = modifiedAt self.name = name + self.outputConversion = outputConversion self.sampleDocument = sampleDocument + self.sampleDocuments = sampleDocuments self.status = status self.transformerArn = transformerArn self.transformerId = transformerId @@ -1039,16 +1298,38 @@ extension B2bi { case createdAt = "createdAt" case ediType = "ediType" case fileFormat = "fileFormat" + case inputConversion = "inputConversion" + case mapping = "mapping" case mappingTemplate = "mappingTemplate" case modifiedAt = "modifiedAt" case name = "name" + case outputConversion = "outputConversion" case sampleDocument = "sampleDocument" + case sampleDocuments = "sampleDocuments" case status = "status" case transformerArn = "transformerArn" case transformerId = "transformerId" } } + public struct InputConversion: AWSEncodableShape & AWSDecodableShape { + /// A structure that contains the formatting options for an inbound transformer. + public let formatOptions: FormatOptions? + /// The format for the transformer input: currently on X12 is supported. + public let fromFormat: FromFormat + + @inlinable + public init(formatOptions: FormatOptions? = nil, fromFormat: FromFormat) { + self.formatOptions = formatOptions + self.fromFormat = fromFormat + } + + private enum CodingKeys: String, CodingKey { + case formatOptions = "formatOptions" + case fromFormat = "fromFormat" + } + } + public struct ListCapabilitiesRequest: AWSEncodableShape { /// Specifies the maximum number of capabilities to return. public let maxResults: Int? @@ -1281,9 +1562,50 @@ extension B2bi { } } + public struct Mapping: AWSEncodableShape & AWSDecodableShape { + /// A string that represents the mapping template, in the transformation language specified in templateLanguage. + public let template: String? + /// The transformation language for the template, either XSLT or JSONATA. + public let templateLanguage: MappingTemplateLanguage + + @inlinable + public init(template: String? = nil, templateLanguage: MappingTemplateLanguage) { + self.template = template + self.templateLanguage = templateLanguage + } + + public func validate(name: String) throws { + try self.validate(self.template, name: "template", parent: name, max: 350000) + } + + private enum CodingKeys: String, CodingKey { + case template = "template" + case templateLanguage = "templateLanguage" + } + } + + public struct OutputConversion: AWSEncodableShape & AWSDecodableShape { + /// A structure that contains the X12 transaction set and version for the transformer output. + public let formatOptions: FormatOptions? + /// The format for the output from an outbound transformer: only X12 is currently supported. + public let toFormat: ToFormat + + @inlinable + public init(formatOptions: FormatOptions? = nil, toFormat: ToFormat) { + self.formatOptions = formatOptions + self.toFormat = toFormat + } + + private enum CodingKeys: String, CodingKey { + case formatOptions = "formatOptions" + case toFormat = "toFormat" + } + } + public struct PartnershipSummary: AWSDecodableShape { /// Returns one or more capabilities associated with this partnership. public let capabilities: [String]? + public let capabilityOptions: CapabilityOptions? /// Returns a timestamp for creation date and time of the partnership. @CustomCoding public var createdAt: Date @@ -1300,8 +1622,9 @@ extension B2bi { public let tradingPartnerId: String? @inlinable - public init(capabilities: [String]? = nil, createdAt: Date, modifiedAt: Date? = nil, name: String? = nil, partnershipId: String, profileId: String, tradingPartnerId: String? = nil) { + public init(capabilities: [String]? = nil, capabilityOptions: CapabilityOptions? = nil, createdAt: Date, modifiedAt: Date? = nil, name: String? = nil, partnershipId: String, profileId: String, tradingPartnerId: String? = nil) { self.capabilities = capabilities + self.capabilityOptions = capabilityOptions self.createdAt = createdAt self.modifiedAt = modifiedAt self.name = name @@ -1312,6 +1635,7 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case capabilities = "capabilities" + case capabilityOptions = "capabilityOptions" case createdAt = "createdAt" case modifiedAt = "modifiedAt" case name = "name" @@ -1385,6 +1709,55 @@ extension B2bi { } } + public struct SampleDocumentKeys: AWSEncodableShape & AWSDecodableShape { + /// An array of keys for your input sample documents. + public let input: String? + /// An array of keys for your output sample documents. + public let output: String? + + @inlinable + public init(input: String? = nil, output: String? = nil) { + self.input = input + self.output = output + } + + public func validate(name: String) throws { + try self.validate(self.input, name: "input", parent: name, max: 1024) + try self.validate(self.output, name: "output", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case input = "input" + case output = "output" + } + } + + public struct SampleDocuments: AWSEncodableShape & AWSDecodableShape { + /// Contains the Amazon S3 bucket that is used to hold your sample documents. + public let bucketName: String + /// Contains an array of the Amazon S3 keys used to identify the location for your sample documents. + public let keys: [SampleDocumentKeys] + + @inlinable + public init(bucketName: String, keys: [SampleDocumentKeys]) { + self.bucketName = bucketName + self.keys = keys + } + + public func validate(name: String) throws { + try self.validate(self.bucketName, name: "bucketName", parent: name, max: 63) + try self.validate(self.bucketName, name: "bucketName", parent: name, min: 3) + try self.keys.forEach { + try $0.validate(name: "\(name).keys[]") + } + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + case keys = "keys" + } + } + public struct StartTransformerJobRequest: AWSEncodableShape { /// Reserved for future use. public let clientToken: String? @@ -1490,12 +1863,52 @@ extension B2bi { } } + public struct TestConversionRequest: AWSEncodableShape { + /// Specify the source file for an outbound EDI request. + public let source: ConversionSource + /// Specify the format (X12 is the only currently supported format), and other details for the conversion target. + public let target: ConversionTarget + + @inlinable + public init(source: ConversionSource, target: ConversionTarget) { + self.source = source + self.target = target + } + + public func validate(name: String) throws { + try self.target.validate(name: "\(name).target") + } + + private enum CodingKeys: String, CodingKey { + case source = "source" + case target = "target" + } + } + + public struct TestConversionResponse: AWSDecodableShape { + /// Returns the converted file content. + public let convertedFileContent: String + /// Returns an array of strings, each containing a message that Amazon Web Services B2B Data Interchange generates during the conversion. + public let validationMessages: [String]? + + @inlinable + public init(convertedFileContent: String, validationMessages: [String]? = nil) { + self.convertedFileContent = convertedFileContent + self.validationMessages = validationMessages + } + + private enum CodingKeys: String, CodingKey { + case convertedFileContent = "convertedFileContent" + case validationMessages = "validationMessages" + } + } + public struct TestMappingRequest: AWSEncodableShape { /// Specifies that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat /// Specify the contents of the EDI (electronic data interchange) XML or JSON file that is used as input for the transform. public let inputFileContent: String - /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. + /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. This parameter is available for backwards compatibility. Use the Mapping data type instead. public let mappingTemplate: String @inlinable @@ -1576,32 +1989,62 @@ extension B2bi { @CustomCoding public var createdAt: Date /// Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - public let ediType: EdiType + public let ediType: EdiType? /// Returns that the currently supported file formats for EDI transformations are JSON and XML. - public let fileFormat: FileFormat + public let fileFormat: FileFormat? + /// Returns a structure that contains the format options for the transformation. + public let inputConversion: InputConversion? + /// Returns the structure that contains the mapping template and its language (either XSLT or JSONATA). + public let mapping: Mapping? /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. - public let mappingTemplate: String + public let mappingTemplate: String? /// Returns a timestamp representing the date and time for the most recent change for the transformer object. @OptionalCustomCoding public var modifiedAt: Date? /// Returns the descriptive name for the transformer. public let name: String + /// Returns the OutputConversion object, which contains the format options for the outbound transformation. + public let outputConversion: OutputConversion? /// Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data. public let sampleDocument: String? + /// Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. + public let sampleDocuments: SampleDocuments? /// Returns the state of the newly created transformer. The transformer can be either active or inactive. For the transformer to be used in a capability, its status must active. public let status: TransformerStatus /// Returns the system-assigned unique identifier for the transformer. public let transformerId: String @inlinable - public init(createdAt: Date, ediType: EdiType, fileFormat: FileFormat, mappingTemplate: String, modifiedAt: Date? = nil, name: String, sampleDocument: String? = nil, status: TransformerStatus, transformerId: String) { + public init(createdAt: Date, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, modifiedAt: Date? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerId: String) { + self.createdAt = createdAt + self.ediType = nil + self.fileFormat = nil + self.inputConversion = inputConversion + self.mapping = mapping + self.mappingTemplate = nil + self.modifiedAt = modifiedAt + self.name = name + self.outputConversion = outputConversion + self.sampleDocument = nil + self.sampleDocuments = sampleDocuments + self.status = status + self.transformerId = transformerId + } + + @available(*, deprecated, message: "Members ediType, fileFormat, mappingTemplate, sampleDocument have been deprecated") + @inlinable + public init(createdAt: Date, ediType: EdiType? = nil, fileFormat: FileFormat? = nil, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, mappingTemplate: String? = nil, modifiedAt: Date? = nil, name: String, outputConversion: OutputConversion? = nil, sampleDocument: String? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerId: String) { self.createdAt = createdAt self.ediType = ediType self.fileFormat = fileFormat + self.inputConversion = inputConversion + self.mapping = mapping self.mappingTemplate = mappingTemplate self.modifiedAt = modifiedAt self.name = name + self.outputConversion = outputConversion self.sampleDocument = sampleDocument + self.sampleDocuments = sampleDocuments self.status = status self.transformerId = transformerId } @@ -1610,10 +2053,14 @@ extension B2bi { case createdAt = "createdAt" case ediType = "ediType" case fileFormat = "fileFormat" + case inputConversion = "inputConversion" + case mapping = "mapping" case mappingTemplate = "mappingTemplate" case modifiedAt = "modifiedAt" case name = "name" + case outputConversion = "outputConversion" case sampleDocument = "sampleDocument" + case sampleDocuments = "sampleDocuments" case status = "status" case transformerId = "transformerId" } @@ -1745,14 +2192,17 @@ extension B2bi { public struct UpdatePartnershipRequest: AWSEncodableShape { /// List of the capabilities associated with this partnership. public let capabilities: [String]? + /// To update, specify the structure that contains the details for the associated capabilities. + public let capabilityOptions: CapabilityOptions? /// The name of the partnership, used to identify it. public let name: String? /// Specifies the unique, system-generated identifier for a partnership. public let partnershipId: String @inlinable - public init(capabilities: [String]? = nil, name: String? = nil, partnershipId: String) { + public init(capabilities: [String]? = nil, capabilityOptions: CapabilityOptions? = nil, name: String? = nil, partnershipId: String) { self.capabilities = capabilities + self.capabilityOptions = capabilityOptions self.name = name self.partnershipId = partnershipId } @@ -1761,6 +2211,7 @@ extension B2bi { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.capabilities, forKey: .capabilities) + try container.encodeIfPresent(self.capabilityOptions, forKey: .capabilityOptions) try container.encodeIfPresent(self.name, forKey: .name) request.encodePath(self.partnershipId, key: "partnershipId") } @@ -1771,6 +2222,7 @@ extension B2bi { try validate($0, name: "capabilities[]", parent: name, min: 1) try validate($0, name: "capabilities[]", parent: name, pattern: "^[a-zA-Z0-9_-]+$") } + try self.capabilityOptions?.validate(name: "\(name).capabilityOptions") try self.validate(self.name, name: "name", parent: name, max: 254) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.partnershipId, name: "partnershipId", parent: name, max: 64) @@ -1780,6 +2232,7 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case capabilities = "capabilities" + case capabilityOptions = "capabilityOptions" case name = "name" } } @@ -1787,6 +2240,8 @@ extension B2bi { public struct UpdatePartnershipResponse: AWSDecodableShape { /// Returns one or more capabilities associated with this partnership. public let capabilities: [String]? + /// Returns the structure that contains the details for the associated capabilities. + public let capabilityOptions: CapabilityOptions? /// Returns a timestamp that identifies the most recent date and time that the partnership was modified. @CustomCoding public var createdAt: Date @@ -1809,8 +2264,9 @@ extension B2bi { public let tradingPartnerId: String? @inlinable - public init(capabilities: [String]? = nil, createdAt: Date, email: String? = nil, modifiedAt: Date? = nil, name: String? = nil, partnershipArn: String, partnershipId: String, phone: String? = nil, profileId: String, tradingPartnerId: String? = nil) { + public init(capabilities: [String]? = nil, capabilityOptions: CapabilityOptions? = nil, createdAt: Date, email: String? = nil, modifiedAt: Date? = nil, name: String? = nil, partnershipArn: String, partnershipId: String, phone: String? = nil, profileId: String, tradingPartnerId: String? = nil) { self.capabilities = capabilities + self.capabilityOptions = capabilityOptions self.createdAt = createdAt self.email = email self.modifiedAt = modifiedAt @@ -1824,6 +2280,7 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case capabilities = "capabilities" + case capabilityOptions = "capabilityOptions" case createdAt = "createdAt" case email = "email" case modifiedAt = "modifiedAt" @@ -1948,24 +2405,52 @@ extension B2bi { public let ediType: EdiType? /// Specifies that the currently supported file formats for EDI transformations are JSON and XML. public let fileFormat: FileFormat? - /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. + /// To update, specify the InputConversion object, which contains the format options for the inbound transformation. + public let inputConversion: InputConversion? + /// Specify the structure that contains the mapping template and its language (either XSLT or JSONATA). + public let mapping: Mapping? + /// Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. This parameter is available for backwards compatibility. Use the Mapping data type instead. public let mappingTemplate: String? /// Specify a new name for the transformer, if you want to update it. public let name: String? + /// To update, specify the OutputConversion object, which contains the format options for the outbound transformation. + public let outputConversion: OutputConversion? /// Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data. public let sampleDocument: String? + /// Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. + public let sampleDocuments: SampleDocuments? /// Specifies the transformer's status. You can update the state of the transformer, from active to inactive, or inactive to active. public let status: TransformerStatus? /// Specifies the system-assigned unique identifier for the transformer. public let transformerId: String @inlinable - public init(ediType: EdiType? = nil, fileFormat: FileFormat? = nil, mappingTemplate: String? = nil, name: String? = nil, sampleDocument: String? = nil, status: TransformerStatus? = nil, transformerId: String) { + public init(inputConversion: InputConversion? = nil, mapping: Mapping? = nil, name: String? = nil, outputConversion: OutputConversion? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus? = nil, transformerId: String) { + self.ediType = nil + self.fileFormat = nil + self.inputConversion = inputConversion + self.mapping = mapping + self.mappingTemplate = nil + self.name = name + self.outputConversion = outputConversion + self.sampleDocument = nil + self.sampleDocuments = sampleDocuments + self.status = status + self.transformerId = transformerId + } + + @available(*, deprecated, message: "Members ediType, fileFormat, mappingTemplate, sampleDocument have been deprecated") + @inlinable + public init(ediType: EdiType? = nil, fileFormat: FileFormat? = nil, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, mappingTemplate: String? = nil, name: String? = nil, outputConversion: OutputConversion? = nil, sampleDocument: String? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus? = nil, transformerId: String) { self.ediType = ediType self.fileFormat = fileFormat + self.inputConversion = inputConversion + self.mapping = mapping self.mappingTemplate = mappingTemplate self.name = name + self.outputConversion = outputConversion self.sampleDocument = sampleDocument + self.sampleDocuments = sampleDocuments self.status = status self.transformerId = transformerId } @@ -1975,18 +2460,25 @@ extension B2bi { var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.ediType, forKey: .ediType) try container.encodeIfPresent(self.fileFormat, forKey: .fileFormat) + try container.encodeIfPresent(self.inputConversion, forKey: .inputConversion) + try container.encodeIfPresent(self.mapping, forKey: .mapping) try container.encodeIfPresent(self.mappingTemplate, forKey: .mappingTemplate) try container.encodeIfPresent(self.name, forKey: .name) + try container.encodeIfPresent(self.outputConversion, forKey: .outputConversion) try container.encodeIfPresent(self.sampleDocument, forKey: .sampleDocument) + try container.encodeIfPresent(self.sampleDocuments, forKey: .sampleDocuments) try container.encodeIfPresent(self.status, forKey: .status) request.encodePath(self.transformerId, key: "transformerId") } public func validate(name: String) throws { + try self.mapping?.validate(name: "\(name).mapping") try self.validate(self.mappingTemplate, name: "mappingTemplate", parent: name, max: 350000) try self.validate(self.name, name: "name", parent: name, max: 254) try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_-]{1,512}$") try self.validate(self.sampleDocument, name: "sampleDocument", parent: name, max: 1024) + try self.sampleDocuments?.validate(name: "\(name).sampleDocuments") try self.validate(self.transformerId, name: "transformerId", parent: name, max: 64) try self.validate(self.transformerId, name: "transformerId", parent: name, min: 1) try self.validate(self.transformerId, name: "transformerId", parent: name, pattern: "^[a-zA-Z0-9_-]+$") @@ -1995,9 +2487,13 @@ extension B2bi { private enum CodingKeys: String, CodingKey { case ediType = "ediType" case fileFormat = "fileFormat" + case inputConversion = "inputConversion" + case mapping = "mapping" case mappingTemplate = "mappingTemplate" case name = "name" + case outputConversion = "outputConversion" case sampleDocument = "sampleDocument" + case sampleDocuments = "sampleDocuments" case status = "status" } } @@ -2007,18 +2503,26 @@ extension B2bi { @CustomCoding public var createdAt: Date /// Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. - public let ediType: EdiType + public let ediType: EdiType? /// Returns that the currently supported file formats for EDI transformations are JSON and XML. - public let fileFormat: FileFormat + public let fileFormat: FileFormat? + /// Returns the InputConversion object, which contains the format options for the inbound transformation. + public let inputConversion: InputConversion? + /// Returns the structure that contains the mapping template and its language (either XSLT or JSONATA). + public let mapping: Mapping? /// Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT. - public let mappingTemplate: String + public let mappingTemplate: String? /// Returns a timestamp for last time the transformer was modified. @CustomCoding public var modifiedAt: Date /// Returns the name of the transformer. public let name: String + /// Returns the OutputConversion object, which contains the format options for the outbound transformation. + public let outputConversion: OutputConversion? /// Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data. public let sampleDocument: String? + /// Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents. + public let sampleDocuments: SampleDocuments? /// Returns the state of the newly created transformer. The transformer can be either active or inactive. For the transformer to be used in a capability, its status must active. public let status: TransformerStatus /// Returns an Amazon Resource Name (ARN) for a specific Amazon Web Services resource, such as a capability, partnership, profile, or transformer. @@ -2027,14 +2531,37 @@ extension B2bi { public let transformerId: String @inlinable - public init(createdAt: Date, ediType: EdiType, fileFormat: FileFormat, mappingTemplate: String, modifiedAt: Date, name: String, sampleDocument: String? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { + public init(createdAt: Date, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, modifiedAt: Date, name: String, outputConversion: OutputConversion? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { + self.createdAt = createdAt + self.ediType = nil + self.fileFormat = nil + self.inputConversion = inputConversion + self.mapping = mapping + self.mappingTemplate = nil + self.modifiedAt = modifiedAt + self.name = name + self.outputConversion = outputConversion + self.sampleDocument = nil + self.sampleDocuments = sampleDocuments + self.status = status + self.transformerArn = transformerArn + self.transformerId = transformerId + } + + @available(*, deprecated, message: "Members ediType, fileFormat, mappingTemplate, sampleDocument have been deprecated") + @inlinable + public init(createdAt: Date, ediType: EdiType? = nil, fileFormat: FileFormat? = nil, inputConversion: InputConversion? = nil, mapping: Mapping? = nil, mappingTemplate: String? = nil, modifiedAt: Date, name: String, outputConversion: OutputConversion? = nil, sampleDocument: String? = nil, sampleDocuments: SampleDocuments? = nil, status: TransformerStatus, transformerArn: String, transformerId: String) { self.createdAt = createdAt self.ediType = ediType self.fileFormat = fileFormat + self.inputConversion = inputConversion + self.mapping = mapping self.mappingTemplate = mappingTemplate self.modifiedAt = modifiedAt self.name = name + self.outputConversion = outputConversion self.sampleDocument = sampleDocument + self.sampleDocuments = sampleDocuments self.status = status self.transformerArn = transformerArn self.transformerId = transformerId @@ -2044,16 +2571,54 @@ extension B2bi { case createdAt = "createdAt" case ediType = "ediType" case fileFormat = "fileFormat" + case inputConversion = "inputConversion" + case mapping = "mapping" case mappingTemplate = "mappingTemplate" case modifiedAt = "modifiedAt" case name = "name" + case outputConversion = "outputConversion" case sampleDocument = "sampleDocument" + case sampleDocuments = "sampleDocuments" case status = "status" case transformerArn = "transformerArn" case transformerId = "transformerId" } } + public struct X12Delimiters: AWSEncodableShape & AWSDecodableShape { + /// The component, or sub-element, separator. The default value is : (colon). + public let componentSeparator: String? + /// The data element separator. The default value is * (asterisk). + public let dataElementSeparator: String? + /// The segment terminator. The default value is ~ (tilde). + public let segmentTerminator: String? + + @inlinable + public init(componentSeparator: String? = nil, dataElementSeparator: String? = nil, segmentTerminator: String? = nil) { + self.componentSeparator = componentSeparator + self.dataElementSeparator = dataElementSeparator + self.segmentTerminator = segmentTerminator + } + + public func validate(name: String) throws { + try self.validate(self.componentSeparator, name: "componentSeparator", parent: name, max: 1) + try self.validate(self.componentSeparator, name: "componentSeparator", parent: name, min: 1) + try self.validate(self.componentSeparator, name: "componentSeparator", parent: name, pattern: "^[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]$") + try self.validate(self.dataElementSeparator, name: "dataElementSeparator", parent: name, max: 1) + try self.validate(self.dataElementSeparator, name: "dataElementSeparator", parent: name, min: 1) + try self.validate(self.dataElementSeparator, name: "dataElementSeparator", parent: name, pattern: "^[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]$") + try self.validate(self.segmentTerminator, name: "segmentTerminator", parent: name, max: 1) + try self.validate(self.segmentTerminator, name: "segmentTerminator", parent: name, min: 1) + try self.validate(self.segmentTerminator, name: "segmentTerminator", parent: name, pattern: "^[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]$") + } + + private enum CodingKeys: String, CodingKey { + case componentSeparator = "componentSeparator" + case dataElementSeparator = "dataElementSeparator" + case segmentTerminator = "segmentTerminator" + } + } + public struct X12Details: AWSEncodableShape & AWSDecodableShape { /// Returns an enumerated type where each value identifies an X12 transaction set. Transaction sets are maintained by the X12 Accredited Standards Committee. public let transactionSet: X12TransactionSet? @@ -2072,6 +2637,151 @@ extension B2bi { } } + public struct X12Envelope: AWSEncodableShape & AWSDecodableShape { + /// A container for the X12 outbound EDI headers. + public let common: X12OutboundEdiHeaders? + + @inlinable + public init(common: X12OutboundEdiHeaders? = nil) { + self.common = common + } + + public func validate(name: String) throws { + try self.common?.validate(name: "\(name).common") + } + + private enum CodingKeys: String, CodingKey { + case common = "common" + } + } + + public struct X12FunctionalGroupHeaders: AWSEncodableShape & AWSDecodableShape { + /// A value representing the code used to identify the party receiving a message, at position GS-03. + public let applicationReceiverCode: String? + /// A value representing the code used to identify the party transmitting a message, at position GS-02. + public let applicationSenderCode: String? + /// A code that identifies the issuer of the standard, at position GS-07. + public let responsibleAgencyCode: String? + + @inlinable + public init(applicationReceiverCode: String? = nil, applicationSenderCode: String? = nil, responsibleAgencyCode: String? = nil) { + self.applicationReceiverCode = applicationReceiverCode + self.applicationSenderCode = applicationSenderCode + self.responsibleAgencyCode = responsibleAgencyCode + } + + public func validate(name: String) throws { + try self.validate(self.applicationReceiverCode, name: "applicationReceiverCode", parent: name, max: 15) + try self.validate(self.applicationReceiverCode, name: "applicationReceiverCode", parent: name, min: 2) + try self.validate(self.applicationReceiverCode, name: "applicationReceiverCode", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.applicationSenderCode, name: "applicationSenderCode", parent: name, max: 15) + try self.validate(self.applicationSenderCode, name: "applicationSenderCode", parent: name, min: 2) + try self.validate(self.applicationSenderCode, name: "applicationSenderCode", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.responsibleAgencyCode, name: "responsibleAgencyCode", parent: name, max: 2) + try self.validate(self.responsibleAgencyCode, name: "responsibleAgencyCode", parent: name, min: 1) + try self.validate(self.responsibleAgencyCode, name: "responsibleAgencyCode", parent: name, pattern: "^[a-zA-Z0-9]*$") + } + + private enum CodingKeys: String, CodingKey { + case applicationReceiverCode = "applicationReceiverCode" + case applicationSenderCode = "applicationSenderCode" + case responsibleAgencyCode = "responsibleAgencyCode" + } + } + + public struct X12InterchangeControlHeaders: AWSEncodableShape & AWSDecodableShape { + /// Located at position ISA-14 in the header. The value "1" indicates that the sender is requesting an interchange acknowledgment at receipt of the interchange. The value "0" is used otherwise. + public let acknowledgmentRequestedCode: String? + /// Located at position ISA-08 in the header. This value (along with the receiverIdQualifier) identifies the intended recipient of the interchange. + public let receiverId: String? + /// Located at position ISA-07 in the header. Qualifier for the receiver ID. Together, the ID and qualifier uniquely identify the receiving trading partner. + public let receiverIdQualifier: String? + /// Located at position ISA-11 in the header. This string makes it easier when you need to group similar adjacent element values together without using extra segments. This parameter is only honored for version greater than 401 (VERSION_4010 and higher). For versions less than 401, this field is called StandardsId, in which case our service sets the value to U. + public let repetitionSeparator: String? + /// Located at position ISA-06 in the header. This value (along with the senderIdQualifier) identifies the sender of the interchange. + public let senderId: String? + /// Located at position ISA-05 in the header. Qualifier for the sender ID. Together, the ID and qualifier uniquely identify the sending trading partner. + public let senderIdQualifier: String? + /// Located at position ISA-15 in the header. Specifies how this interchange is being used: T indicates this interchange is for testing. P indicates this interchange is for production. I indicates this interchange is informational. + public let usageIndicatorCode: String? + + @inlinable + public init(acknowledgmentRequestedCode: String? = nil, receiverId: String? = nil, receiverIdQualifier: String? = nil, repetitionSeparator: String? = nil, senderId: String? = nil, senderIdQualifier: String? = nil, usageIndicatorCode: String? = nil) { + self.acknowledgmentRequestedCode = acknowledgmentRequestedCode + self.receiverId = receiverId + self.receiverIdQualifier = receiverIdQualifier + self.repetitionSeparator = repetitionSeparator + self.senderId = senderId + self.senderIdQualifier = senderIdQualifier + self.usageIndicatorCode = usageIndicatorCode + } + + public func validate(name: String) throws { + try self.validate(self.acknowledgmentRequestedCode, name: "acknowledgmentRequestedCode", parent: name, max: 1) + try self.validate(self.acknowledgmentRequestedCode, name: "acknowledgmentRequestedCode", parent: name, min: 1) + try self.validate(self.acknowledgmentRequestedCode, name: "acknowledgmentRequestedCode", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.receiverId, name: "receiverId", parent: name, max: 15) + try self.validate(self.receiverId, name: "receiverId", parent: name, min: 15) + try self.validate(self.receiverId, name: "receiverId", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.receiverIdQualifier, name: "receiverIdQualifier", parent: name, max: 2) + try self.validate(self.receiverIdQualifier, name: "receiverIdQualifier", parent: name, min: 2) + try self.validate(self.receiverIdQualifier, name: "receiverIdQualifier", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.repetitionSeparator, name: "repetitionSeparator", parent: name, max: 1) + try self.validate(self.repetitionSeparator, name: "repetitionSeparator", parent: name, min: 1) + try self.validate(self.senderId, name: "senderId", parent: name, max: 15) + try self.validate(self.senderId, name: "senderId", parent: name, min: 15) + try self.validate(self.senderId, name: "senderId", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.senderIdQualifier, name: "senderIdQualifier", parent: name, max: 2) + try self.validate(self.senderIdQualifier, name: "senderIdQualifier", parent: name, min: 2) + try self.validate(self.senderIdQualifier, name: "senderIdQualifier", parent: name, pattern: "^[a-zA-Z0-9]*$") + try self.validate(self.usageIndicatorCode, name: "usageIndicatorCode", parent: name, max: 1) + try self.validate(self.usageIndicatorCode, name: "usageIndicatorCode", parent: name, min: 1) + try self.validate(self.usageIndicatorCode, name: "usageIndicatorCode", parent: name, pattern: "^[a-zA-Z0-9]*$") + } + + private enum CodingKeys: String, CodingKey { + case acknowledgmentRequestedCode = "acknowledgmentRequestedCode" + case receiverId = "receiverId" + case receiverIdQualifier = "receiverIdQualifier" + case repetitionSeparator = "repetitionSeparator" + case senderId = "senderId" + case senderIdQualifier = "senderIdQualifier" + case usageIndicatorCode = "usageIndicatorCode" + } + } + + public struct X12OutboundEdiHeaders: AWSEncodableShape & AWSDecodableShape { + /// The delimiters, for example semicolon (;), that separates sections of the headers for the X12 object. + public let delimiters: X12Delimiters? + /// The functional group headers for the X12 object. + public let functionalGroupHeaders: X12FunctionalGroupHeaders? + /// In X12 EDI messages, delimiters are used to mark the end of segments or elements, and are defined in the interchange control header. + public let interchangeControlHeaders: X12InterchangeControlHeaders? + /// Specifies whether or not to validate the EDI for this X12 object: TRUE or FALSE. + public let validateEdi: Bool? + + @inlinable + public init(delimiters: X12Delimiters? = nil, functionalGroupHeaders: X12FunctionalGroupHeaders? = nil, interchangeControlHeaders: X12InterchangeControlHeaders? = nil, validateEdi: Bool? = nil) { + self.delimiters = delimiters + self.functionalGroupHeaders = functionalGroupHeaders + self.interchangeControlHeaders = interchangeControlHeaders + self.validateEdi = validateEdi + } + + public func validate(name: String) throws { + try self.delimiters?.validate(name: "\(name).delimiters") + try self.functionalGroupHeaders?.validate(name: "\(name).functionalGroupHeaders") + try self.interchangeControlHeaders?.validate(name: "\(name).interchangeControlHeaders") + } + + private enum CodingKeys: String, CodingKey { + case delimiters = "delimiters" + case functionalGroupHeaders = "functionalGroupHeaders" + case interchangeControlHeaders = "interchangeControlHeaders" + case validateEdi = "validateEdi" + } + } + public struct CapabilityConfiguration: AWSEncodableShape & AWSDecodableShape { /// An EDI (electronic data interchange) configuration object. public let edi: EdiConfiguration? @@ -2090,6 +2800,19 @@ extension B2bi { } } + public struct ConversionTargetFormatDetails: AWSEncodableShape { + public let x12: X12Details? + + @inlinable + public init(x12: X12Details? = nil) { + self.x12 = x12 + } + + private enum CodingKeys: String, CodingKey { + case x12 = "x12" + } + } + public struct EdiType: AWSEncodableShape & AWSDecodableShape { /// Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents. public let x12Details: X12Details? @@ -2103,6 +2826,81 @@ extension B2bi { case x12Details = "x12Details" } } + + public struct FormatOptions: AWSEncodableShape & AWSDecodableShape { + public let x12: X12Details? + + @inlinable + public init(x12: X12Details? = nil) { + self.x12 = x12 + } + + private enum CodingKeys: String, CodingKey { + case x12 = "x12" + } + } + + public struct InputFileSource: AWSEncodableShape { + /// Specify the input contents, as a string, for the source of an outbound transformation. + public let fileContent: String? + + @inlinable + public init(fileContent: String? = nil) { + self.fileContent = fileContent + } + + private enum CodingKeys: String, CodingKey { + case fileContent = "fileContent" + } + } + + public struct OutboundEdiOptions: AWSEncodableShape & AWSDecodableShape { + /// A structure that contains an X12 envelope structure. + public let x12: X12Envelope? + + @inlinable + public init(x12: X12Envelope? = nil) { + self.x12 = x12 + } + + public func validate(name: String) throws { + try self.x12?.validate(name: "\(name).x12") + } + + private enum CodingKeys: String, CodingKey { + case x12 = "x12" + } + } + + public struct OutputSampleFileSource: AWSEncodableShape { + public let fileLocation: S3Location? + + @inlinable + public init(fileLocation: S3Location? = nil) { + self.fileLocation = fileLocation + } + + public func validate(name: String) throws { + try self.fileLocation?.validate(name: "\(name).fileLocation") + } + + private enum CodingKeys: String, CodingKey { + case fileLocation = "fileLocation" + } + } + + public struct TemplateDetails: AWSEncodableShape { + public let x12: X12Details? + + @inlinable + public init(x12: X12Details? = nil) { + self.x12 = x12 + } + + private enum CodingKeys: String, CodingKey { + case x12 = "x12" + } + } } // MARK: - Errors diff --git a/Sources/Soto/Services/Bedrock/Bedrock_api.swift b/Sources/Soto/Services/Bedrock/Bedrock_api.swift index 666b844638..dfa0f8bc9d 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_api.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_api.swift @@ -78,6 +78,7 @@ public struct Bedrock: AWSService { /// custom endpoints for regions static var serviceEndpoints: [String: String] {[ "bedrock-ap-northeast-1": "bedrock.ap-northeast-1.amazonaws.com", + "bedrock-ap-northeast-2": "bedrock.ap-northeast-2.amazonaws.com", "bedrock-ap-south-1": "bedrock.ap-south-1.amazonaws.com", "bedrock-ap-southeast-1": "bedrock.ap-southeast-1.amazonaws.com", "bedrock-ap-southeast-2": "bedrock.ap-southeast-2.amazonaws.com", @@ -88,9 +89,11 @@ public struct Bedrock: AWSService { "bedrock-eu-west-3": "bedrock.eu-west-3.amazonaws.com", "bedrock-fips-ca-central-1": "bedrock-fips.ca-central-1.amazonaws.com", "bedrock-fips-us-east-1": "bedrock-fips.us-east-1.amazonaws.com", + "bedrock-fips-us-east-2": "bedrock-fips.us-east-2.amazonaws.com", "bedrock-fips-us-gov-west-1": "bedrock-fips.us-gov-west-1.amazonaws.com", "bedrock-fips-us-west-2": "bedrock-fips.us-west-2.amazonaws.com", "bedrock-runtime-ap-northeast-1": "bedrock-runtime.ap-northeast-1.amazonaws.com", + "bedrock-runtime-ap-northeast-2": "bedrock-runtime.ap-northeast-2.amazonaws.com", "bedrock-runtime-ap-south-1": "bedrock-runtime.ap-south-1.amazonaws.com", "bedrock-runtime-ap-southeast-1": "bedrock-runtime.ap-southeast-1.amazonaws.com", "bedrock-runtime-ap-southeast-2": "bedrock-runtime.ap-southeast-2.amazonaws.com", @@ -101,14 +104,17 @@ public struct Bedrock: AWSService { "bedrock-runtime-eu-west-3": "bedrock-runtime.eu-west-3.amazonaws.com", "bedrock-runtime-fips-ca-central-1": "bedrock-runtime-fips.ca-central-1.amazonaws.com", "bedrock-runtime-fips-us-east-1": "bedrock-runtime-fips.us-east-1.amazonaws.com", + "bedrock-runtime-fips-us-east-2": "bedrock-runtime-fips.us-east-2.amazonaws.com", "bedrock-runtime-fips-us-gov-west-1": "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", "bedrock-runtime-fips-us-west-2": "bedrock-runtime-fips.us-west-2.amazonaws.com", "bedrock-runtime-sa-east-1": "bedrock-runtime.sa-east-1.amazonaws.com", "bedrock-runtime-us-east-1": "bedrock-runtime.us-east-1.amazonaws.com", + "bedrock-runtime-us-east-2": "bedrock-runtime.us-east-2.amazonaws.com", "bedrock-runtime-us-gov-west-1": "bedrock-runtime.us-gov-west-1.amazonaws.com", "bedrock-runtime-us-west-2": "bedrock-runtime.us-west-2.amazonaws.com", "bedrock-sa-east-1": "bedrock.sa-east-1.amazonaws.com", "bedrock-us-east-1": "bedrock.us-east-1.amazonaws.com", + "bedrock-us-east-2": "bedrock.us-east-2.amazonaws.com", "bedrock-us-gov-west-1": "bedrock.us-gov-west-1.amazonaws.com", "bedrock-us-west-2": "bedrock.us-west-2.amazonaws.com" ]} @@ -165,7 +171,7 @@ public struct Bedrock: AWSService { /// - clientRequestToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. /// - customerEncryptionKeyId: Specify your customer managed key ARN that will be used to encrypt your model evaluation job. /// - evaluationConfig: Specifies whether the model evaluation job is automatic or uses human worker. - /// - inferenceConfig: Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model, and model evaluation job that use human workers support two models. + /// - inferenceConfig: Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model or inference profile, and model evaluation job that use human workers support two models or inference profiles. /// - jobDescription: A description of the model evaluation job. /// - jobName: The name of the model evaluation job. Model evaluation job names must unique with your AWS account, and your account's AWS region. /// - jobTags: Tags to attach to the model evaluation job. @@ -366,7 +372,7 @@ public struct Bedrock: AWSService { /// - roleArn: The Amazon Resource Name (ARN) of an IAM service role that Amazon Bedrock can assume to perform tasks on your behalf. For example, during model training, Amazon Bedrock needs your permission to read input data from an S3 bucket, write model artifacts to an S3 bucket. To pass this role to Amazon Bedrock, the caller of this API must have the iam:PassRole permission. /// - trainingDataConfig: Information about the training dataset. /// - validationDataConfig: Information about the validation dataset. - /// - vpcConfig: VPC configuration (optional). Configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for this job. + /// - vpcConfig: The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC. /// - logger: Logger use during operation @inlinable public func createModelCustomizationJob( @@ -482,6 +488,7 @@ public struct Bedrock: AWSService { /// - roleArn: The Amazon Resource Name (ARN) of the service role with permissions to carry out and manage batch inference. You can use the console to create a default service role or follow the steps at Create a service role for batch inference. /// - tags: Any tags to associate with the batch inference job. For more information, see Tagging Amazon Bedrock resources. /// - timeoutDurationInHours: The number of hours after which to force the batch inference job to time out. + /// - vpcConfig: The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC. /// - logger: Logger use during operation @inlinable public func createModelInvocationJob( @@ -493,6 +500,7 @@ public struct Bedrock: AWSService { roleArn: String, tags: [Tag]? = nil, timeoutDurationInHours: Int? = nil, + vpcConfig: VpcConfig? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateModelInvocationJobResponse { let input = CreateModelInvocationJobRequest( @@ -503,7 +511,8 @@ public struct Bedrock: AWSService { outputDataConfig: outputDataConfig, roleArn: roleArn, tags: tags, - timeoutDurationInHours: timeoutDurationInHours + timeoutDurationInHours: timeoutDurationInHours, + vpcConfig: vpcConfig ) return try await self.createModelInvocationJob(input, logger: logger) } diff --git a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift index 904a7bd11a..c1f9874f66 100644 --- a/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift +++ b/Sources/Soto/Services/Bedrock/Bedrock_shapes.swift @@ -455,7 +455,7 @@ extension Bedrock { public let customerEncryptionKeyId: String? /// Specifies whether the model evaluation job is automatic or uses human worker. public let evaluationConfig: EvaluationConfig - /// Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model, and model evaluation job that use human workers support two models. + /// Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model or inference profile, and model evaluation job that use human workers support two models or inference profiles. public let inferenceConfig: EvaluationInferenceConfig /// A description of the model evaluation job. public let jobDescription: String? @@ -791,7 +791,7 @@ extension Bedrock { public let trainingDataConfig: TrainingDataConfig /// Information about the validation dataset. public let validationDataConfig: ValidationDataConfig? - /// VPC configuration (optional). Configuration parameters for the private Virtual Private Cloud (VPC) that contains the resources you are using for this job. + /// The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC. public let vpcConfig: VpcConfig? @inlinable @@ -980,9 +980,11 @@ extension Bedrock { public let tags: [Tag]? /// The number of hours after which to force the batch inference job to time out. public let timeoutDurationInHours: Int? + /// The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC. + public let vpcConfig: VpcConfig? @inlinable - public init(clientRequestToken: String? = CreateModelInvocationJobRequest.idempotencyToken(), inputDataConfig: ModelInvocationJobInputDataConfig, jobName: String, modelId: String, outputDataConfig: ModelInvocationJobOutputDataConfig, roleArn: String, tags: [Tag]? = nil, timeoutDurationInHours: Int? = nil) { + public init(clientRequestToken: String? = CreateModelInvocationJobRequest.idempotencyToken(), inputDataConfig: ModelInvocationJobInputDataConfig, jobName: String, modelId: String, outputDataConfig: ModelInvocationJobOutputDataConfig, roleArn: String, tags: [Tag]? = nil, timeoutDurationInHours: Int? = nil, vpcConfig: VpcConfig? = nil) { self.clientRequestToken = clientRequestToken self.inputDataConfig = inputDataConfig self.jobName = jobName @@ -991,6 +993,7 @@ extension Bedrock { self.roleArn = roleArn self.tags = tags self.timeoutDurationInHours = timeoutDurationInHours + self.vpcConfig = vpcConfig } public func validate(name: String) throws { @@ -1013,6 +1016,7 @@ extension Bedrock { try self.validate(self.tags, name: "tags", parent: name, max: 200) try self.validate(self.timeoutDurationInHours, name: "timeoutDurationInHours", parent: name, max: 168) try self.validate(self.timeoutDurationInHours, name: "timeoutDurationInHours", parent: name, min: 24) + try self.vpcConfig?.validate(name: "\(name).vpcConfig") } private enum CodingKeys: String, CodingKey { @@ -1024,6 +1028,7 @@ extension Bedrock { case roleArn = "roleArn" case tags = "tags" case timeoutDurationInHours = "timeoutDurationInHours" + case vpcConfig = "vpcConfig" } } @@ -1270,7 +1275,7 @@ extension Bedrock { public struct EvaluationBedrockModel: AWSEncodableShape & AWSDecodableShape { /// Each Amazon Bedrock support different inference parameters that change how the model behaves during inference. public let inferenceParams: String - /// The ARN of the Amazon Bedrock model specified. + /// The ARN of the Amazon Bedrock model or inference profile specified. public let modelIdentifier: String @inlinable @@ -1284,7 +1289,7 @@ extension Bedrock { try self.validate(self.inferenceParams, name: "inferenceParams", parent: name, min: 1) try self.validate(self.modelIdentifier, name: "modelIdentifier", parent: name, max: 2048) try self.validate(self.modelIdentifier, name: "modelIdentifier", parent: name, min: 1) - try self.validate(self.modelIdentifier, name: "modelIdentifier", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$") + try self.validate(self.modelIdentifier, name: "modelIdentifier", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:((:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:inference-profile/(([a-z]{2}.)[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))))|(([a-z]{2}[.]{1})([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))$") } private enum CodingKeys: String, CodingKey { @@ -2333,9 +2338,11 @@ extension Bedrock { public var submitTime: Date /// The number of hours after which batch inference job was set to time out. public let timeoutDurationInHours: Int? + /// The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC. + public let vpcConfig: VpcConfig? @inlinable - public init(clientRequestToken: String? = nil, endTime: Date? = nil, inputDataConfig: ModelInvocationJobInputDataConfig, jobArn: String, jobExpirationTime: Date? = nil, jobName: String? = nil, lastModifiedTime: Date? = nil, message: String? = nil, modelId: String, outputDataConfig: ModelInvocationJobOutputDataConfig, roleArn: String, status: ModelInvocationJobStatus? = nil, submitTime: Date, timeoutDurationInHours: Int? = nil) { + public init(clientRequestToken: String? = nil, endTime: Date? = nil, inputDataConfig: ModelInvocationJobInputDataConfig, jobArn: String, jobExpirationTime: Date? = nil, jobName: String? = nil, lastModifiedTime: Date? = nil, message: String? = nil, modelId: String, outputDataConfig: ModelInvocationJobOutputDataConfig, roleArn: String, status: ModelInvocationJobStatus? = nil, submitTime: Date, timeoutDurationInHours: Int? = nil, vpcConfig: VpcConfig? = nil) { self.clientRequestToken = clientRequestToken self.endTime = endTime self.inputDataConfig = inputDataConfig @@ -2350,6 +2357,7 @@ extension Bedrock { self.status = status self.submitTime = submitTime self.timeoutDurationInHours = timeoutDurationInHours + self.vpcConfig = vpcConfig } private enum CodingKeys: String, CodingKey { @@ -2367,6 +2375,7 @@ extension Bedrock { case status = "status" case submitTime = "submitTime" case timeoutDurationInHours = "timeoutDurationInHours" + case vpcConfig = "vpcConfig" } } @@ -4189,42 +4198,51 @@ extension Bedrock { } public struct ModelInvocationJobS3InputDataConfig: AWSEncodableShape & AWSDecodableShape { + /// The ID of the Amazon Web Services account that owns the S3 bucket containing the input data. + public let s3BucketOwner: String? /// The format of the input data. public let s3InputFormat: S3InputFormat? /// The S3 location of the input data. public let s3Uri: String @inlinable - public init(s3InputFormat: S3InputFormat? = nil, s3Uri: String) { + public init(s3BucketOwner: String? = nil, s3InputFormat: S3InputFormat? = nil, s3Uri: String) { + self.s3BucketOwner = s3BucketOwner self.s3InputFormat = s3InputFormat self.s3Uri = s3Uri } public func validate(name: String) throws { + try self.validate(self.s3BucketOwner, name: "s3BucketOwner", parent: name, pattern: "^[0-9]{12}$") try self.validate(self.s3Uri, name: "s3Uri", parent: name, max: 1024) try self.validate(self.s3Uri, name: "s3Uri", parent: name, min: 1) try self.validate(self.s3Uri, name: "s3Uri", parent: name, pattern: "^s3://[a-z0-9][-.a-z0-9]{1,61}(?:/[-!_*'().a-z0-9A-Z]+(?:/[-!_*'().a-z0-9A-Z]+)*)?/?$") } private enum CodingKeys: String, CodingKey { + case s3BucketOwner = "s3BucketOwner" case s3InputFormat = "s3InputFormat" case s3Uri = "s3Uri" } } public struct ModelInvocationJobS3OutputDataConfig: AWSEncodableShape & AWSDecodableShape { + /// The ID of the Amazon Web Services account that owns the S3 bucket containing the output data. + public let s3BucketOwner: String? /// The unique identifier of the key that encrypts the S3 location of the output data. public let s3EncryptionKeyId: String? /// The S3 location of the output data. public let s3Uri: String @inlinable - public init(s3EncryptionKeyId: String? = nil, s3Uri: String) { + public init(s3BucketOwner: String? = nil, s3EncryptionKeyId: String? = nil, s3Uri: String) { + self.s3BucketOwner = s3BucketOwner self.s3EncryptionKeyId = s3EncryptionKeyId self.s3Uri = s3Uri } public func validate(name: String) throws { + try self.validate(self.s3BucketOwner, name: "s3BucketOwner", parent: name, pattern: "^[0-9]{12}$") try self.validate(self.s3EncryptionKeyId, name: "s3EncryptionKeyId", parent: name, max: 2048) try self.validate(self.s3EncryptionKeyId, name: "s3EncryptionKeyId", parent: name, min: 1) try self.validate(self.s3EncryptionKeyId, name: "s3EncryptionKeyId", parent: name, pattern: "^(arn:aws(-[^:]+)?:kms:[a-zA-Z0-9-]*:[0-9]{12}:((key/[a-zA-Z0-9-]{36})|(alias/[a-zA-Z0-9-_/]+)))|([a-zA-Z0-9-]{36})|(alias/[a-zA-Z0-9-_/]+)$") @@ -4234,6 +4252,7 @@ extension Bedrock { } private enum CodingKeys: String, CodingKey { + case s3BucketOwner = "s3BucketOwner" case s3EncryptionKeyId = "s3EncryptionKeyId" case s3Uri = "s3Uri" } @@ -4272,9 +4291,11 @@ extension Bedrock { public var submitTime: Date /// The number of hours after which the batch inference job was set to time out. public let timeoutDurationInHours: Int? + /// The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC. + public let vpcConfig: VpcConfig? @inlinable - public init(clientRequestToken: String? = nil, endTime: Date? = nil, inputDataConfig: ModelInvocationJobInputDataConfig, jobArn: String, jobExpirationTime: Date? = nil, jobName: String, lastModifiedTime: Date? = nil, message: String? = nil, modelId: String, outputDataConfig: ModelInvocationJobOutputDataConfig, roleArn: String, status: ModelInvocationJobStatus? = nil, submitTime: Date, timeoutDurationInHours: Int? = nil) { + public init(clientRequestToken: String? = nil, endTime: Date? = nil, inputDataConfig: ModelInvocationJobInputDataConfig, jobArn: String, jobExpirationTime: Date? = nil, jobName: String, lastModifiedTime: Date? = nil, message: String? = nil, modelId: String, outputDataConfig: ModelInvocationJobOutputDataConfig, roleArn: String, status: ModelInvocationJobStatus? = nil, submitTime: Date, timeoutDurationInHours: Int? = nil, vpcConfig: VpcConfig? = nil) { self.clientRequestToken = clientRequestToken self.endTime = endTime self.inputDataConfig = inputDataConfig @@ -4289,6 +4310,7 @@ extension Bedrock { self.status = status self.submitTime = submitTime self.timeoutDurationInHours = timeoutDurationInHours + self.vpcConfig = vpcConfig } private enum CodingKeys: String, CodingKey { @@ -4306,6 +4328,7 @@ extension Bedrock { case status = "status" case submitTime = "submitTime" case timeoutDurationInHours = "timeoutDurationInHours" + case vpcConfig = "vpcConfig" } } @@ -4880,9 +4903,9 @@ extension Bedrock { } public struct VpcConfig: AWSEncodableShape & AWSDecodableShape { - /// VPC configuration security group Ids. + /// An array of IDs for each security group in the VPC to use. public let securityGroupIds: [String] - /// VPC configuration subnets. + /// An array of IDs for each subnet in the VPC to use. public let subnetIds: [String] @inlinable @@ -4955,7 +4978,7 @@ extension Bedrock { } public struct EvaluationModelConfig: AWSEncodableShape & AWSDecodableShape { - /// Defines the Amazon Bedrock model and inference parameters you want used. + /// Defines the Amazon Bedrock model or inference profile and inference parameters you want used. public let bedrockModel: EvaluationBedrockModel? @inlinable diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift index cdf66ab436..fb52449cc0 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_api.swift @@ -142,7 +142,7 @@ public struct BedrockAgent: AWSService { /// - clientToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. /// - customerEncryptionKeyArn: The Amazon Resource Name (ARN) of the KMS key with which to encrypt the agent. /// - description: A description of the agent. - /// - foundationModel: The foundation model to be used for orchestration by the agent you create. + /// - foundationModel: The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create. /// - guardrailConfiguration: The unique Guardrail configuration assigned to the agent when it is created. /// - idleSessionTTLInSeconds: The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent. A user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout. /// - instruction: Instructions that tell the agent what it should do and how it should interact with users. @@ -459,7 +459,7 @@ public struct BedrockAgent: AWSService { return try await self.createFlowVersion(input, logger: logger) } - /// Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion. If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base. Provide the name and an optional description. Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field. Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object. Provide the configuration for your vector store in the storageConfiguration object. For an Amazon OpenSearch Service database, use the opensearchServerlessConfiguration object. For more information, see Create a vector store in Amazon OpenSearch Service. For an Amazon Aurora database, use the RdsConfiguration object. For more information, see Create a vector store in Amazon Aurora. For a Pinecone database, use the pineconeConfiguration object. For more information, see Create a vector store in Pinecone. For a Redis Enterprise Cloud database, use the redisEnterpriseCloudConfiguration object. For more information, see Create a vector store in Redis Enterprise Cloud. + /// Creates a knowledge base. A knowledge base contains your data sources so that Large Language Models (LLMs) can use your data. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up a knowledge base. If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base. Provide the name and an optional description. Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field. Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object. Provide the configuration for your vector store in the storageConfiguration object. For an Amazon OpenSearch Service database, use the opensearchServerlessConfiguration object. For more information, see Create a vector store in Amazon OpenSearch Service. For an Amazon Aurora database, use the RdsConfiguration object. For more information, see Create a vector store in Amazon Aurora. For a Pinecone database, use the pineconeConfiguration object. For more information, see Create a vector store in Pinecone. For a Redis Enterprise Cloud database, use the redisEnterpriseCloudConfiguration object. For more information, see Create a vector store in Redis Enterprise Cloud. @Sendable @inlinable public func createKnowledgeBase(_ input: CreateKnowledgeBaseRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateKnowledgeBaseResponse { @@ -472,7 +472,7 @@ public struct BedrockAgent: AWSService { logger: logger ) } - /// Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion. If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base. Provide the name and an optional description. Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field. Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object. Provide the configuration for your vector store in the storageConfiguration object. For an Amazon OpenSearch Service database, use the opensearchServerlessConfiguration object. For more information, see Create a vector store in Amazon OpenSearch Service. For an Amazon Aurora database, use the RdsConfiguration object. For more information, see Create a vector store in Amazon Aurora. For a Pinecone database, use the pineconeConfiguration object. For more information, see Create a vector store in Pinecone. For a Redis Enterprise Cloud database, use the redisEnterpriseCloudConfiguration object. For more information, see Create a vector store in Redis Enterprise Cloud. + /// Creates a knowledge base. A knowledge base contains your data sources so that Large Language Models (LLMs) can use your data. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up a knowledge base. If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base. Provide the name and an optional description. Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field. Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object. Provide the configuration for your vector store in the storageConfiguration object. For an Amazon OpenSearch Service database, use the opensearchServerlessConfiguration object. For more information, see Create a vector store in Amazon OpenSearch Service. For an Amazon Aurora database, use the RdsConfiguration object. For more information, see Create a vector store in Amazon Aurora. For a Pinecone database, use the pineconeConfiguration object. For more information, see Create a vector store in Pinecone. For a Redis Enterprise Cloud database, use the redisEnterpriseCloudConfiguration object. For more information, see Create a vector store in Redis Enterprise Cloud. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. @@ -1135,7 +1135,7 @@ public struct BedrockAgent: AWSService { /// /// Parameters: /// - dataSourceId: The unique identifier of the data source. - /// - knowledgeBaseId: The unique identifier of the knowledge base that the data source was added to. + /// - knowledgeBaseId: The unique identifier of the knowledge base for the data source. /// - logger: Logger use during operation @inlinable public func getDataSource( @@ -1243,7 +1243,7 @@ public struct BedrockAgent: AWSService { return try await self.getFlowVersion(input, logger: logger) } - /// Gets information about a ingestion job, in which a data source is added to a knowledge base. + /// Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Lanaguage Models (LLMs) can use your data. @Sendable @inlinable public func getIngestionJob(_ input: GetIngestionJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetIngestionJobResponse { @@ -1256,12 +1256,12 @@ public struct BedrockAgent: AWSService { logger: logger ) } - /// Gets information about a ingestion job, in which a data source is added to a knowledge base. + /// Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Lanaguage Models (LLMs) can use your data. /// /// Parameters: - /// - dataSourceId: The unique identifier of the data source in the ingestion job. - /// - ingestionJobId: The unique identifier of the ingestion job. - /// - knowledgeBaseId: The unique identifier of the knowledge base for which the ingestion job applies. + /// - dataSourceId: The unique identifier of the data source for the data ingestion job you want to get information on. + /// - ingestionJobId: The unique identifier of the data ingestion job you want to get information on. + /// - knowledgeBaseId: The unique identifier of the knowledge base for the data ingestion job you want to get information on. /// - logger: Logger use during operation @inlinable public func getIngestionJob( @@ -1294,7 +1294,7 @@ public struct BedrockAgent: AWSService { /// Gets information about a knoweldge base. /// /// Parameters: - /// - knowledgeBaseId: The unique identifier of the knowledge base for which to get information. + /// - knowledgeBaseId: The unique identifier of the knowledge base you want to get information on. /// - logger: Logger use during operation @inlinable public func getKnowledgeBase( @@ -1654,7 +1654,7 @@ public struct BedrockAgent: AWSService { return try await self.listFlows(input, logger: logger) } - /// Lists the ingestion jobs for a data source and information about each of them. + /// Lists the data ingestion jobs for a data source. The list also includes information about each job. @Sendable @inlinable public func listIngestionJobs(_ input: ListIngestionJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListIngestionJobsResponse { @@ -1667,15 +1667,15 @@ public struct BedrockAgent: AWSService { logger: logger ) } - /// Lists the ingestion jobs for a data source and information about each of them. + /// Lists the data ingestion jobs for a data source. The list also includes information about each job. /// /// Parameters: - /// - dataSourceId: The unique identifier of the data source for which to return ingestion jobs. - /// - filters: Contains a definition of a filter for which to filter the results. - /// - knowledgeBaseId: The unique identifier of the knowledge base for which to return ingestion jobs. + /// - dataSourceId: The unique identifier of the data source for the list of data ingestion jobs. + /// - filters: Contains information about the filters for filtering the data. + /// - knowledgeBaseId: The unique identifier of the knowledge base for the list of data ingestion jobs. /// - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. /// - nextToken: If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. - /// - sortBy: Contains details about how to sort the results. + /// - sortBy: Contains details about how to sort the data. /// - logger: Logger use during operation @inlinable public func listIngestionJobs( @@ -1698,7 +1698,7 @@ public struct BedrockAgent: AWSService { return try await self.listIngestionJobs(input, logger: logger) } - /// Lists the knowledge bases in an account and information about each of them. + /// Lists the knowledge bases in an account. The list also includesinformation about each knowledge base. @Sendable @inlinable public func listKnowledgeBases(_ input: ListKnowledgeBasesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListKnowledgeBasesResponse { @@ -1711,7 +1711,7 @@ public struct BedrockAgent: AWSService { logger: logger ) } - /// Lists the knowledge bases in an account and information about each of them. + /// Lists the knowledge bases in an account. The list also includesinformation about each knowledge base. /// /// Parameters: /// - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. @@ -1852,7 +1852,7 @@ public struct BedrockAgent: AWSService { return try await self.prepareFlow(input, logger: logger) } - /// Begins an ingestion job, in which a data source is added to a knowledge base. + /// Begins a data ingestion job. Data sources are ingested into your knowledge base so that Large Language Models (LLMs) can use your data. @Sendable @inlinable public func startIngestionJob(_ input: StartIngestionJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartIngestionJobResponse { @@ -1865,13 +1865,13 @@ public struct BedrockAgent: AWSService { logger: logger ) } - /// Begins an ingestion job, in which a data source is added to a knowledge base. + /// Begins a data ingestion job. Data sources are ingested into your knowledge base so that Large Language Models (LLMs) can use your data. /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. - /// - dataSourceId: The unique identifier of the data source to ingest. - /// - description: A description of the ingestion job. - /// - knowledgeBaseId: The unique identifier of the knowledge base to which to add the data source. + /// - dataSourceId: The unique identifier of the data source you want to ingest into your knowledge base. + /// - description: A description of the data ingestion job. + /// - knowledgeBaseId: The unique identifier of the knowledge base for the data ingestion job. /// - logger: Logger use during operation @inlinable public func startIngestionJob( @@ -1890,6 +1890,41 @@ public struct BedrockAgent: AWSService { return try await self.startIngestionJob(input, logger: logger) } + /// Stops a currently running data ingestion job. You can send a StartIngestionJob request again to ingest the rest of your data when you are ready. + @Sendable + @inlinable + public func stopIngestionJob(_ input: StopIngestionJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopIngestionJobResponse { + try await self.client.execute( + operation: "StopIngestionJob", + path: "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/{ingestionJobId}/stop", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Stops a currently running data ingestion job. You can send a StartIngestionJob request again to ingest the rest of your data when you are ready. + /// + /// Parameters: + /// - dataSourceId: The unique identifier of the data source for the data ingestion job you want to stop. + /// - ingestionJobId: The unique identifier of the data ingestion job you want to stop. + /// - knowledgeBaseId: The unique identifier of the knowledge base for the data ingestion job you want to stop. + /// - logger: Logger use during operation + @inlinable + public func stopIngestionJob( + dataSourceId: String, + ingestionJobId: String, + knowledgeBaseId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StopIngestionJobResponse { + let input = StopIngestionJobRequest( + dataSourceId: dataSourceId, + ingestionJobId: ingestionJobId, + knowledgeBaseId: knowledgeBaseId + ) + return try await self.stopIngestionJob(input, logger: logger) + } + /// Associate tags with a resource. For more information, see Tagging resources in the Amazon Bedrock User Guide. @Sendable @inlinable @@ -2742,11 +2777,11 @@ extension BedrockAgent { /// Return PaginatorSequence for operation ``listIngestionJobs(_:logger:)``. /// /// - Parameters: - /// - dataSourceId: The unique identifier of the data source for which to return ingestion jobs. - /// - filters: Contains a definition of a filter for which to filter the results. - /// - knowledgeBaseId: The unique identifier of the knowledge base for which to return ingestion jobs. + /// - dataSourceId: The unique identifier of the data source for the list of data ingestion jobs. + /// - filters: Contains information about the filters for filtering the data. + /// - knowledgeBaseId: The unique identifier of the knowledge base for the list of data ingestion jobs. /// - maxResults: The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. - /// - sortBy: Contains details about how to sort the results. + /// - sortBy: Contains details about how to sort the data. /// - logger: Logger used for logging @inlinable public func listIngestionJobsPaginator( diff --git a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift index 121ee34a2d..cf59995337 100644 --- a/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift +++ b/Sources/Soto/Services/BedrockAgent/BedrockAgent_shapes.swift @@ -182,6 +182,8 @@ extension BedrockAgent { case failed = "FAILED" case inProgress = "IN_PROGRESS" case starting = "STARTING" + case stopped = "STOPPED" + case stopping = "STOPPING" public var description: String { return self.rawValue } } @@ -1665,7 +1667,7 @@ extension BedrockAgent { public let customerEncryptionKeyArn: String? /// A description of the agent. public let description: String? - /// The foundation model to be used for orchestration by the agent you create. + /// The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create. public let foundationModel: String? /// The unique Guardrail configuration assigned to the agent when it is created. public let guardrailConfiguration: GuardrailConfiguration? @@ -3773,7 +3775,7 @@ extension BedrockAgent { public struct GetDataSourceRequest: AWSEncodableShape { /// The unique identifier of the data source. public let dataSourceId: String - /// The unique identifier of the knowledge base that the data source was added to. + /// The unique identifier of the knowledge base for the data source. public let knowledgeBaseId: String @inlinable @@ -4043,11 +4045,11 @@ extension BedrockAgent { } public struct GetIngestionJobRequest: AWSEncodableShape { - /// The unique identifier of the data source in the ingestion job. + /// The unique identifier of the data source for the data ingestion job you want to get information on. public let dataSourceId: String - /// The unique identifier of the ingestion job. + /// The unique identifier of the data ingestion job you want to get information on. public let ingestionJobId: String - /// The unique identifier of the knowledge base for which the ingestion job applies. + /// The unique identifier of the knowledge base for the data ingestion job you want to get information on. public let knowledgeBaseId: String @inlinable @@ -4075,7 +4077,7 @@ extension BedrockAgent { } public struct GetIngestionJobResponse: AWSDecodableShape { - /// Contains details about the ingestion job. + /// Contains details about the data ingestion job. public let ingestionJob: IngestionJob @inlinable @@ -4089,7 +4091,7 @@ extension BedrockAgent { } public struct GetKnowledgeBaseRequest: AWSEncodableShape { - /// The unique identifier of the knowledge base for which to get information. + /// The unique identifier of the knowledge base you want to get information on. public let knowledgeBaseId: String @inlinable @@ -4309,24 +4311,24 @@ extension BedrockAgent { } public struct IngestionJob: AWSDecodableShape { - /// The unique identifier of the ingested data source. + /// The unique identifier of the data source for the data ingestion job. public let dataSourceId: String - /// The description of the ingestion job. + /// The description of the data ingestion job. public let description: String? - /// A list of reasons that the ingestion job failed. + /// A list of reasons that the data ingestion job failed. public let failureReasons: [String]? - /// The unique identifier of the ingestion job. + /// The unique identifier of the data ingestion job. public let ingestionJobId: String - /// The unique identifier of the knowledge base to which the data source is being added. + /// The unique identifier of the knowledge for the data ingestion job. public let knowledgeBaseId: String - /// The time at which the ingestion job started. + /// The time the data ingestion job started. If you stop a data ingestion job, the startedAt time is the time the job was started before the job was stopped. @CustomCoding public var startedAt: Date - /// Contains statistics about the ingestion job. + /// Contains statistics about the data ingestion job. public let statistics: IngestionJobStatistics? - /// The status of the ingestion job. + /// The status of the data ingestion job. public let status: IngestionJobStatus - /// The time at which the ingestion job was last updated. + /// The time the data ingestion job was last updated. If you stop a data ingestion job, the updatedAt time is the time the job was stopped. @CustomCoding public var updatedAt: Date @@ -4357,11 +4359,11 @@ extension BedrockAgent { } public struct IngestionJobFilter: AWSEncodableShape { - /// The attribute by which to filter the results. + /// The name of field or attribute to apply the filter. public let attribute: IngestionJobFilterAttribute - /// The operation to carry out between the attribute and the values. + /// The operation to apply to the field or attribute. public let `operator`: IngestionJobFilterOperator - /// A list of values for the attribute. + /// A list of values that belong to the field or attribute. public let values: [String] @inlinable @@ -4387,9 +4389,9 @@ extension BedrockAgent { } public struct IngestionJobSortBy: AWSEncodableShape { - /// The attribute by which to sort the results. + /// The name of field or attribute to apply sorting of data. public let attribute: IngestionJobSortByAttribute - /// The order by which to sort the results. + /// The order for sorting the data. public let order: SortOrder @inlinable @@ -4405,7 +4407,7 @@ extension BedrockAgent { } public struct IngestionJobStatistics: AWSDecodableShape { - /// The number of source documents that was deleted. + /// The number of source documents that were deleted. public let numberOfDocumentsDeleted: Int64? /// The number of source documents that failed to be ingested. public let numberOfDocumentsFailed: Int64? @@ -4443,22 +4445,22 @@ extension BedrockAgent { } public struct IngestionJobSummary: AWSDecodableShape { - /// The unique identifier of the data source in the ingestion job. + /// The unique identifier of the data source for the data ingestion job. public let dataSourceId: String - /// The description of the ingestion job. + /// The description of the data ingestion job. public let description: String? - /// The unique identifier of the ingestion job. + /// The unique identifier of the data ingestion job. public let ingestionJobId: String - /// The unique identifier of the knowledge base to which the data source is added. + /// The unique identifier of the knowledge base for the data ingestion job. public let knowledgeBaseId: String - /// The time at which the ingestion job was started. + /// The time the data ingestion job started. @CustomCoding public var startedAt: Date - /// Contains statistics for the ingestion job. + /// Contains statistics for the data ingestion job. public let statistics: IngestionJobStatistics? - /// The status of the ingestion job. + /// The status of the data ingestion job. public let status: IngestionJobStatus - /// The time at which the ingestion job was last updated. + /// The time the data ingestion job was last updated. @CustomCoding public var updatedAt: Date @@ -4513,7 +4515,7 @@ extension BedrockAgent { } public struct KnowledgeBase: AWSDecodableShape { - /// The time at which the knowledge base was created. + /// The time the knowledge base was created. @CustomCoding public var createdAt: Date /// The description of the knowledge base. @@ -4534,7 +4536,7 @@ extension BedrockAgent { public let status: KnowledgeBaseStatus /// Contains details about the storage configuration of the knowledge base. public let storageConfiguration: StorageConfiguration - /// The time at which the knowledge base was last updated. + /// The time the knowledge base was last updated. @CustomCoding public var updatedAt: Date @@ -4571,7 +4573,7 @@ extension BedrockAgent { public struct KnowledgeBaseConfiguration: AWSEncodableShape & AWSDecodableShape { /// The type of data that the data source is converted into for the knowledge base. public let type: KnowledgeBaseType - /// Contains details about the embeddings model that'sused to convert the data source. + /// Contains details about the model that's used to convert the data source into vector embeddings. public let vectorKnowledgeBaseConfiguration: VectorKnowledgeBaseConfiguration? @inlinable @@ -4593,7 +4595,7 @@ extension BedrockAgent { public struct KnowledgeBaseFlowNodeConfiguration: AWSEncodableShape & AWSDecodableShape { /// The unique identifier of the knowledge base to query. public let knowledgeBaseId: String - /// The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. + /// The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array. public let modelId: String? @inlinable @@ -4607,7 +4609,7 @@ extension BedrockAgent { try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]+$") try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) try self.validate(self.modelId, name: "modelId", parent: name, min: 1) - try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$") + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$") } private enum CodingKeys: String, CodingKey { @@ -4625,7 +4627,7 @@ extension BedrockAgent { public let name: String /// The status of the knowledge base. public let status: KnowledgeBaseStatus - /// The time at which the knowledge base was last updated. + /// The time the knowledge base was last updated. @CustomCoding public var updatedAt: Date @@ -5184,17 +5186,17 @@ extension BedrockAgent { } public struct ListIngestionJobsRequest: AWSEncodableShape { - /// The unique identifier of the data source for which to return ingestion jobs. + /// The unique identifier of the data source for the list of data ingestion jobs. public let dataSourceId: String - /// Contains a definition of a filter for which to filter the results. + /// Contains information about the filters for filtering the data. public let filters: [IngestionJobFilter]? - /// The unique identifier of the knowledge base for which to return ingestion jobs. + /// The unique identifier of the knowledge base for the list of data ingestion jobs. public let knowledgeBaseId: String /// The maximum number of results to return in the response. If the total number of results is greater than this value, use the token returned in the response in the nextToken field when making another request to return the next batch of results. public let maxResults: Int? /// If the total number of results is greater than the maxResults value provided in the request, enter the token returned in the nextToken field in the response in this field to return the next batch of results. public let nextToken: String? - /// Contains details about how to sort the results. + /// Contains details about how to sort the data. public let sortBy: IngestionJobSortBy? @inlinable @@ -5242,7 +5244,7 @@ extension BedrockAgent { } public struct ListIngestionJobsResponse: AWSDecodableShape { - /// A list of objects, each of which contains information about an ingestion job. + /// A list of data ingestion jobs with information about each job. public let ingestionJobSummaries: [IngestionJobSummary] /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? @@ -5286,7 +5288,7 @@ extension BedrockAgent { } public struct ListKnowledgeBasesResponse: AWSDecodableShape { - /// A list of objects, each of which contains information about a knowledge base. + /// A list of knowledge bases with information about each knowledge base. public let knowledgeBaseSummaries: [KnowledgeBaseSummary] /// If the total number of results is greater than the maxResults value provided in the request, use this token when making another request in the nextToken field to return the next batch of results. public let nextToken: String? @@ -5908,7 +5910,7 @@ extension BedrockAgent { public struct PromptFlowNodeInlineConfiguration: AWSEncodableShape & AWSDecodableShape { /// Contains inference configurations for the prompt. public let inferenceConfiguration: PromptInferenceConfiguration? - /// The unique identifier of the model to run inference with. + /// The unique identifier of the model or inference profile to run inference with. public let modelId: String /// Contains a prompt and variables in the prompt that can be replaced with values at runtime. public let templateConfiguration: PromptTemplateConfiguration @@ -5927,7 +5929,7 @@ extension BedrockAgent { try self.inferenceConfiguration?.validate(name: "\(name).inferenceConfiguration") try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) try self.validate(self.modelId, name: "modelId", parent: name, min: 1) - try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$") try self.templateConfiguration.validate(name: "\(name).templateConfiguration") } @@ -6044,7 +6046,7 @@ extension BedrockAgent { } public struct PromptOverrideConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock. + /// The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Amazon Bedrock Agents. public let overrideLambda: String? /// Contains configurations to override a prompt template in one part of an agent sequence. For more information, see Advanced prompts. public let promptConfigurations: [PromptConfiguration] @@ -6115,7 +6117,7 @@ extension BedrockAgent { public let inferenceConfiguration: PromptInferenceConfiguration? /// An array of objects, each containing a key-value pair that defines a metadata tag and value to attach to a prompt variant. For more information, see Create a prompt using Prompt management. public let metadata: [PromptMetadataEntry]? - /// The unique identifier of the model with which to run inference on the prompt. + /// The unique identifier of the model or inference profile with which to run inference on the prompt. public let modelId: String? /// The name of the prompt variant. public let name: String @@ -6142,7 +6144,7 @@ extension BedrockAgent { try self.validate(self.metadata, name: "metadata", parent: name, max: 50) try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) try self.validate(self.modelId, name: "modelId", parent: name, min: 1) - try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$") + try self.validate(self.modelId, name: "modelId", parent: name, pattern: "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$") try self.validate(self.name, name: "name", parent: name, pattern: "^([0-9a-zA-Z][_-]?){1,100}$") try self.templateConfiguration?.validate(name: "\(name).templateConfiguration") } @@ -6642,11 +6644,11 @@ extension BedrockAgent { public struct StartIngestionJobRequest: AWSEncodableShape { /// A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. public let clientToken: String? - /// The unique identifier of the data source to ingest. + /// The unique identifier of the data source you want to ingest into your knowledge base. public let dataSourceId: String - /// A description of the ingestion job. + /// A description of the data ingestion job. public let description: String? - /// The unique identifier of the knowledge base to which to add the data source. + /// The unique identifier of the knowledge base for the data ingestion job. public let knowledgeBaseId: String @inlinable @@ -6683,7 +6685,53 @@ extension BedrockAgent { } public struct StartIngestionJobResponse: AWSDecodableShape { - /// An object containing information about the ingestion job. + /// Contains information about the data ingestion job. + public let ingestionJob: IngestionJob + + @inlinable + public init(ingestionJob: IngestionJob) { + self.ingestionJob = ingestionJob + } + + private enum CodingKeys: String, CodingKey { + case ingestionJob = "ingestionJob" + } + } + + public struct StopIngestionJobRequest: AWSEncodableShape { + /// The unique identifier of the data source for the data ingestion job you want to stop. + public let dataSourceId: String + /// The unique identifier of the data ingestion job you want to stop. + public let ingestionJobId: String + /// The unique identifier of the knowledge base for the data ingestion job you want to stop. + public let knowledgeBaseId: String + + @inlinable + public init(dataSourceId: String, ingestionJobId: String, knowledgeBaseId: String) { + self.dataSourceId = dataSourceId + self.ingestionJobId = ingestionJobId + self.knowledgeBaseId = knowledgeBaseId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.dataSourceId, key: "dataSourceId") + request.encodePath(self.ingestionJobId, key: "ingestionJobId") + request.encodePath(self.knowledgeBaseId, key: "knowledgeBaseId") + } + + public func validate(name: String) throws { + try self.validate(self.dataSourceId, name: "dataSourceId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.ingestionJobId, name: "ingestionJobId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + try self.validate(self.knowledgeBaseId, name: "knowledgeBaseId", parent: name, pattern: "^[0-9a-zA-Z]{10}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct StopIngestionJobResponse: AWSDecodableShape { + /// Contains information about the stopped data ingestion job. public let ingestionJob: IngestionJob @inlinable diff --git a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift index b86e36f2d1..bf33496104 100644 --- a/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockAgentRuntime/BedrockAgentRuntime_shapes.swift @@ -1233,9 +1233,9 @@ extension BedrockAgentRuntime { public struct ExternalSourcesRetrieveAndGenerateConfiguration: AWSEncodableShape { /// The prompt used with the external source wrapper object with the retrieveAndGenerate function. public let generationConfiguration: ExternalSourcesGenerationConfiguration? - /// The modelArn used with the external source wrapper object in the retrieveAndGenerate function. + /// The model Amazon Resource Name (ARN) for the external source wrapper object in the retrieveAndGenerate function. public let modelArn: String - /// The document used with the external source wrapper object in the retrieveAndGenerate function. + /// The document for the external source wrapper object in the retrieveAndGenerate function. public let sources: [ExternalSource] @inlinable @@ -2561,19 +2561,25 @@ extension BedrockAgentRuntime { } public struct PostProcessingModelInvocationOutput: AWSDecodableShape { + public let metadata: Metadata? /// Details about the response from the Lambda parsing of the output of the post-processing step. public let parsedResponse: PostProcessingParsedResponse? + public let rawResponse: RawResponse? /// The unique identifier of the trace. public let traceId: String? @inlinable - public init(parsedResponse: PostProcessingParsedResponse? = nil, traceId: String? = nil) { + public init(metadata: Metadata? = nil, parsedResponse: PostProcessingParsedResponse? = nil, rawResponse: RawResponse? = nil, traceId: String? = nil) { + self.metadata = metadata self.parsedResponse = parsedResponse + self.rawResponse = rawResponse self.traceId = traceId } private enum CodingKeys: String, CodingKey { + case metadata = "metadata" case parsedResponse = "parsedResponse" + case rawResponse = "rawResponse" case traceId = "traceId" } } @@ -2593,19 +2599,25 @@ extension BedrockAgentRuntime { } public struct PreProcessingModelInvocationOutput: AWSDecodableShape { + public let metadata: Metadata? /// Details about the response from the Lambda parsing of the output of the pre-processing step. public let parsedResponse: PreProcessingParsedResponse? + public let rawResponse: RawResponse? /// The unique identifier of the trace. public let traceId: String? @inlinable - public init(parsedResponse: PreProcessingParsedResponse? = nil, traceId: String? = nil) { + public init(metadata: Metadata? = nil, parsedResponse: PreProcessingParsedResponse? = nil, rawResponse: RawResponse? = nil, traceId: String? = nil) { + self.metadata = metadata self.parsedResponse = parsedResponse + self.rawResponse = rawResponse self.traceId = traceId } private enum CodingKeys: String, CodingKey { + case metadata = "metadata" case parsedResponse = "parsedResponse" + case rawResponse = "rawResponse" case traceId = "traceId" } } @@ -2871,11 +2883,11 @@ extension BedrockAgentRuntime { } public struct RetrieveAndGenerateConfiguration: AWSEncodableShape { - /// The configuration used with the external source wrapper object in the retrieveAndGenerate function. + /// The configuration for the external source wrapper object in the retrieveAndGenerate function. public let externalSourcesConfiguration: ExternalSourcesRetrieveAndGenerateConfiguration? - /// Contains details about the resource being queried. + /// Contains details about the knowledge base for retrieving information and generating responses. public let knowledgeBaseConfiguration: KnowledgeBaseRetrieveAndGenerateConfiguration? - /// The type of resource that is queried by the request. + /// The type of resource that contains your data for retrieving information and generating responses. If you choose ot use EXTERNAL_SOURCES, then currently only Claude 3 Sonnet models for knowledge bases are supported. public let type: RetrieveAndGenerateType @inlinable diff --git a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift index 3a6479e159..0d8dd23030 100644 --- a/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift +++ b/Sources/Soto/Services/BedrockRuntime/BedrockRuntime_shapes.swift @@ -59,6 +59,14 @@ extension BedrockRuntime { public var description: String { return self.rawValue } } + public enum GuardrailContentFilterStrength: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case high = "HIGH" + case low = "LOW" + case medium = "MEDIUM" + case none = "NONE" + public var description: String { return self.rawValue } + } + public enum GuardrailContentFilterType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case hate = "HATE" case insults = "INSULTS" @@ -644,15 +652,18 @@ extension BedrockRuntime { public let action: GuardrailAction /// The assessment details in the response from the guardrail. public let assessments: [GuardrailAssessment] + /// The guardrail coverage details in the apply guardrail response. + public let guardrailCoverage: GuardrailCoverage? /// The output details in the response from the guardrail. public let outputs: [GuardrailOutputContent] /// The usage details in the response from the guardrail. public let usage: GuardrailUsage @inlinable - public init(action: GuardrailAction, assessments: [GuardrailAssessment], outputs: [GuardrailOutputContent], usage: GuardrailUsage) { + public init(action: GuardrailAction, assessments: [GuardrailAssessment], guardrailCoverage: GuardrailCoverage? = nil, outputs: [GuardrailOutputContent], usage: GuardrailUsage) { self.action = action self.assessments = assessments + self.guardrailCoverage = guardrailCoverage self.outputs = outputs self.usage = usage } @@ -660,6 +671,7 @@ extension BedrockRuntime { private enum CodingKeys: String, CodingKey { case action = "action" case assessments = "assessments" + case guardrailCoverage = "guardrailCoverage" case outputs = "outputs" case usage = "usage" } @@ -1016,6 +1028,8 @@ extension BedrockRuntime { public let contentPolicy: GuardrailContentPolicyAssessment? /// The contextual grounding policy used for the guardrail assessment. public let contextualGroundingPolicy: GuardrailContextualGroundingPolicyAssessment? + /// The invocation metrics for the guardrail assessment. + public let invocationMetrics: GuardrailInvocationMetrics? /// The sensitive information policy. public let sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? /// The topic policy. @@ -1024,9 +1038,10 @@ extension BedrockRuntime { public let wordPolicy: GuardrailWordPolicyAssessment? @inlinable - public init(contentPolicy: GuardrailContentPolicyAssessment? = nil, contextualGroundingPolicy: GuardrailContextualGroundingPolicyAssessment? = nil, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? = nil, topicPolicy: GuardrailTopicPolicyAssessment? = nil, wordPolicy: GuardrailWordPolicyAssessment? = nil) { + public init(contentPolicy: GuardrailContentPolicyAssessment? = nil, contextualGroundingPolicy: GuardrailContextualGroundingPolicyAssessment? = nil, invocationMetrics: GuardrailInvocationMetrics? = nil, sensitiveInformationPolicy: GuardrailSensitiveInformationPolicyAssessment? = nil, topicPolicy: GuardrailTopicPolicyAssessment? = nil, wordPolicy: GuardrailWordPolicyAssessment? = nil) { self.contentPolicy = contentPolicy self.contextualGroundingPolicy = contextualGroundingPolicy + self.invocationMetrics = invocationMetrics self.sensitiveInformationPolicy = sensitiveInformationPolicy self.topicPolicy = topicPolicy self.wordPolicy = wordPolicy @@ -1035,6 +1050,7 @@ extension BedrockRuntime { private enum CodingKeys: String, CodingKey { case contentPolicy = "contentPolicy" case contextualGroundingPolicy = "contextualGroundingPolicy" + case invocationMetrics = "invocationMetrics" case sensitiveInformationPolicy = "sensitiveInformationPolicy" case topicPolicy = "topicPolicy" case wordPolicy = "wordPolicy" @@ -1074,19 +1090,23 @@ extension BedrockRuntime { public let action: GuardrailContentPolicyAction /// The guardrail confidence. public let confidence: GuardrailContentFilterConfidence + /// The filter strength setting for the guardrail content filter. + public let filterStrength: GuardrailContentFilterStrength? /// The guardrail type. public let type: GuardrailContentFilterType @inlinable - public init(action: GuardrailContentPolicyAction, confidence: GuardrailContentFilterConfidence, type: GuardrailContentFilterType) { + public init(action: GuardrailContentPolicyAction, confidence: GuardrailContentFilterConfidence, filterStrength: GuardrailContentFilterStrength? = nil, type: GuardrailContentFilterType) { self.action = action self.confidence = confidence + self.filterStrength = filterStrength self.type = type } private enum CodingKeys: String, CodingKey { case action = "action" case confidence = "confidence" + case filterStrength = "filterStrength" case type = "type" } } @@ -1163,6 +1183,20 @@ extension BedrockRuntime { } } + public struct GuardrailCoverage: AWSDecodableShape { + /// The text characters of the guardrail coverage details. + public let textCharacters: GuardrailTextCharactersCoverage? + + @inlinable + public init(textCharacters: GuardrailTextCharactersCoverage? = nil) { + self.textCharacters = textCharacters + } + + private enum CodingKeys: String, CodingKey { + case textCharacters = "textCharacters" + } + } + public struct GuardrailCustomWord: AWSDecodableShape { /// The action for the custom word. public let action: GuardrailWordPolicyAction @@ -1181,6 +1215,28 @@ extension BedrockRuntime { } } + public struct GuardrailInvocationMetrics: AWSDecodableShape { + /// The coverage details for the guardrail invocation metrics. + public let guardrailCoverage: GuardrailCoverage? + /// The processing latency details for the guardrail invocation metrics. + public let guardrailProcessingLatency: Int64? + /// The usage details for the guardrail invocation metrics. + public let usage: GuardrailUsage? + + @inlinable + public init(guardrailCoverage: GuardrailCoverage? = nil, guardrailProcessingLatency: Int64? = nil, usage: GuardrailUsage? = nil) { + self.guardrailCoverage = guardrailCoverage + self.guardrailProcessingLatency = guardrailProcessingLatency + self.usage = usage + } + + private enum CodingKeys: String, CodingKey { + case guardrailCoverage = "guardrailCoverage" + case guardrailProcessingLatency = "guardrailProcessingLatency" + case usage = "usage" + } + } + public struct GuardrailManagedWord: AWSDecodableShape { /// The action for the managed word. public let action: GuardrailWordPolicyAction @@ -1333,6 +1389,24 @@ extension BedrockRuntime { } } + public struct GuardrailTextCharactersCoverage: AWSDecodableShape { + /// The text characters that were guarded by the guardrail coverage. + public let guarded: Int? + /// The total text characters by the guardrail coverage. + public let total: Int? + + @inlinable + public init(guarded: Int? = nil, total: Int? = nil) { + self.guarded = guarded + self.total = total + } + + private enum CodingKeys: String, CodingKey { + case guarded = "guarded" + case total = "total" + } + } + public struct GuardrailTopic: AWSDecodableShape { /// The action the guardrail should take when it intervenes on a topic. public let action: GuardrailTopicPolicyAction diff --git a/Sources/Soto/Services/Budgets/Budgets_shapes.swift b/Sources/Soto/Services/Budgets/Budgets_shapes.swift index 5d13d334cf..a31a2cd2a6 100644 --- a/Sources/Soto/Services/Budgets/Budgets_shapes.swift +++ b/Sources/Soto/Services/Budgets/Budgets_shapes.swift @@ -575,7 +575,7 @@ extension Budgets { try self.definition.validate(name: "\(name).definition") try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 618) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 32) - try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:aws(-cn|-us-gov|-iso|-iso-[a-z]{1})?:iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") try self.resourceTags?.forEach { try $0.validate(name: "\(name).resourceTags[]") } @@ -1593,7 +1593,7 @@ extension Budgets { try self.validate(self.groups, name: "groups", parent: name, min: 1) try self.validate(self.policyArn, name: "policyArn", parent: name, max: 684) try self.validate(self.policyArn, name: "policyArn", parent: name, min: 25) - try self.validate(self.policyArn, name: "policyArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::(\\d{12}|aws):policy(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") + try self.validate(self.policyArn, name: "policyArn", parent: name, pattern: "^arn:aws(-cn|-us-gov|-iso|-iso-[a-z]{1})?:iam::(\\d{12}|aws):policy(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") try self.roles?.forEach { try validate($0, name: "roles[]", parent: name, max: 576) try validate($0, name: "roles[]", parent: name, min: 1) @@ -2002,7 +2002,7 @@ extension Budgets { try self.definition?.validate(name: "\(name).definition") try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 618) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 32) - try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:aws(-cn|-us-gov|-iso|-iso-[a-z]{1})?:iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$") try self.subscribers?.forEach { try $0.validate(name: "\(name).subscribers[]") } diff --git a/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift b/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift index 9976fe7d04..0402d0f964 100644 --- a/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift +++ b/Sources/Soto/Services/Chatbot/Chatbot_shapes.swift @@ -57,18 +57,25 @@ extension Chatbot { public let loggingLevel: String? /// The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration. + /// For example, if Amazon Chime is disabled. + public let state: String? + /// Provided if State is DISABLED. Provides context as to why the resource is disabled. + public let stateReason: String? /// A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs. public let tags: [Tag]? /// A description of the webhook. We recommend using the convention RoomName/WebhookName. For more information, see Tutorial: Get started with Amazon Chime in the AWS Chatbot Administrator Guide. public let webhookDescription: String @inlinable - public init(chatConfigurationArn: String, configurationName: String? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], tags: [Tag]? = nil, webhookDescription: String) { + public init(chatConfigurationArn: String, configurationName: String? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], state: String? = nil, stateReason: String? = nil, tags: [Tag]? = nil, webhookDescription: String) { self.chatConfigurationArn = chatConfigurationArn self.configurationName = configurationName self.iamRoleArn = iamRoleArn self.loggingLevel = loggingLevel self.snsTopicArns = snsTopicArns + self.state = state + self.stateReason = stateReason self.tags = tags self.webhookDescription = webhookDescription } @@ -79,12 +86,19 @@ extension Chatbot { case iamRoleArn = "IamRoleArn" case loggingLevel = "LoggingLevel" case snsTopicArns = "SnsTopicArns" + case state = "State" + case stateReason = "StateReason" case tags = "Tags" case webhookDescription = "WebhookDescription" } } public struct ConfiguredTeam: AWSDecodableShape { + /// Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration. + /// For example, if Amazon Chime is disabled. + public let state: String? + /// Provided if State is DISABLED. Provides context as to why the resource is disabled. + public let stateReason: String? /// The ID of the Microsoft Teams authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more information, see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide. public let teamId: String /// The name of the Microsoft Teams Team. @@ -93,13 +107,17 @@ extension Chatbot { public let tenantId: String @inlinable - public init(teamId: String, teamName: String? = nil, tenantId: String) { + public init(state: String? = nil, stateReason: String? = nil, teamId: String, teamName: String? = nil, tenantId: String) { + self.state = state + self.stateReason = stateReason self.teamId = teamId self.teamName = teamName self.tenantId = tenantId } private enum CodingKeys: String, CodingKey { + case state = "State" + case stateReason = "StateReason" case teamId = "TeamId" case teamName = "TeamName" case tenantId = "TenantId" @@ -1031,13 +1049,18 @@ extension Chatbot { public let slackTeamName: String /// The ARNs of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration. + /// For example, if Amazon Chime is disabled. + public let state: String? + /// Provided if State is DISABLED. Provides context as to why the resource is disabled. + public let stateReason: String? /// A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs. public let tags: [Tag]? /// Enables use of a user role requirement in your chat configuration. public let userAuthorizationRequired: Bool? @inlinable - public init(chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, slackChannelId: String, slackChannelName: String, slackTeamId: String, slackTeamName: String, snsTopicArns: [String], tags: [Tag]? = nil, userAuthorizationRequired: Bool? = nil) { + public init(chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, slackChannelId: String, slackChannelName: String, slackTeamId: String, slackTeamName: String, snsTopicArns: [String], state: String? = nil, stateReason: String? = nil, tags: [Tag]? = nil, userAuthorizationRequired: Bool? = nil) { self.chatConfigurationArn = chatConfigurationArn self.configurationName = configurationName self.guardrailPolicyArns = guardrailPolicyArns @@ -1048,6 +1071,8 @@ extension Chatbot { self.slackTeamId = slackTeamId self.slackTeamName = slackTeamName self.snsTopicArns = snsTopicArns + self.state = state + self.stateReason = stateReason self.tags = tags self.userAuthorizationRequired = userAuthorizationRequired } @@ -1063,6 +1088,8 @@ extension Chatbot { case slackTeamId = "SlackTeamId" case slackTeamName = "SlackTeamName" case snsTopicArns = "SnsTopicArns" + case state = "State" + case stateReason = "StateReason" case tags = "Tags" case userAuthorizationRequired = "UserAuthorizationRequired" } @@ -1103,16 +1130,25 @@ extension Chatbot { public let slackTeamId: String /// The name of the Slack workspace. public let slackTeamName: String + /// Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration. + /// For example, if Amazon Chime is disabled. + public let state: String? + /// Provided if State is DISABLED. Provides context as to why the resource is disabled. + public let stateReason: String? @inlinable - public init(slackTeamId: String, slackTeamName: String) { + public init(slackTeamId: String, slackTeamName: String, state: String? = nil, stateReason: String? = nil) { self.slackTeamId = slackTeamId self.slackTeamName = slackTeamName + self.state = state + self.stateReason = stateReason } private enum CodingKeys: String, CodingKey { case slackTeamId = "SlackTeamId" case slackTeamName = "SlackTeamName" + case state = "State" + case stateReason = "StateReason" } } @@ -1189,6 +1225,11 @@ extension Chatbot { public let loggingLevel: String? /// The Amazon Resource Names (ARNs) of the SNS topics that deliver notifications to AWS Chatbot. public let snsTopicArns: [String] + /// Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration. + /// For example, if Amazon Chime is disabled. + public let state: String? + /// Provided if State is DISABLED. Provides context as to why the resource is disabled. + public let stateReason: String? /// A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs. public let tags: [Tag]? /// The ID of the Microsoft Teams authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more information, see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide. @@ -1201,7 +1242,7 @@ extension Chatbot { public let userAuthorizationRequired: Bool? @inlinable - public init(channelId: String, channelName: String? = nil, chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], tags: [Tag]? = nil, teamId: String, teamName: String? = nil, tenantId: String, userAuthorizationRequired: Bool? = nil) { + public init(channelId: String, channelName: String? = nil, chatConfigurationArn: String, configurationName: String? = nil, guardrailPolicyArns: [String]? = nil, iamRoleArn: String, loggingLevel: String? = nil, snsTopicArns: [String], state: String? = nil, stateReason: String? = nil, tags: [Tag]? = nil, teamId: String, teamName: String? = nil, tenantId: String, userAuthorizationRequired: Bool? = nil) { self.channelId = channelId self.channelName = channelName self.chatConfigurationArn = chatConfigurationArn @@ -1210,6 +1251,8 @@ extension Chatbot { self.iamRoleArn = iamRoleArn self.loggingLevel = loggingLevel self.snsTopicArns = snsTopicArns + self.state = state + self.stateReason = stateReason self.tags = tags self.teamId = teamId self.teamName = teamName @@ -1226,6 +1269,8 @@ extension Chatbot { case iamRoleArn = "IamRoleArn" case loggingLevel = "LoggingLevel" case snsTopicArns = "SnsTopicArns" + case state = "State" + case stateReason = "StateReason" case tags = "Tags" case teamId = "TeamId" case teamName = "TeamName" diff --git a/Sources/Soto/Services/CloudFormation/CloudFormation_api.swift b/Sources/Soto/Services/CloudFormation/CloudFormation_api.swift index 2ecef25693..731537f07b 100644 --- a/Sources/Soto/Services/CloudFormation/CloudFormation_api.swift +++ b/Sources/Soto/Services/CloudFormation/CloudFormation_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS CloudFormation service. /// -/// CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com. +/// CloudFormation CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services infrastructure. With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack together and manages all dependencies between the resources for you. For more information about CloudFormation, see the CloudFormation product page. CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com. public struct CloudFormation: AWSService { // MARK: Member variables @@ -124,7 +124,7 @@ public struct CloudFormation: AWSService { return try await self.activateOrganizationsAccess(input, logger: logger) } - /// Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide. Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// Activates a public third-party extension, making it available for use in stack templates. Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Using public extensions in the CloudFormation User Guide. @Sendable @inlinable public func activateType(_ input: ActivateTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ActivateTypeOutput { @@ -137,7 +137,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Activates a public third-party extension, making it available for use in stack templates. For more information, see Using public extensions in the CloudFormation User Guide. Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// Activates a public third-party extension, making it available for use in stack templates. Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Using public extensions in the CloudFormation User Guide. /// /// Parameters: /// - autoUpdate: Whether to automatically update the extension in this account and Region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. The default is true. @@ -180,7 +180,7 @@ public struct CloudFormation: AWSService { return try await self.activateType(input, logger: logger) } - /// Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. @Sendable @inlinable public func batchDescribeTypeConfigurations(_ input: BatchDescribeTypeConfigurationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDescribeTypeConfigurationsOutput { @@ -193,7 +193,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry for the account and Region. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. /// /// Parameters: /// - typeConfigurationIdentifiers: The list of identifiers for the desired extension configurations. @@ -241,7 +241,7 @@ public struct CloudFormation: AWSService { return try await self.cancelUpdateStack(input, logger: logger) } - /// For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again. A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail. + /// For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again. A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail. @Sendable @inlinable public func continueUpdateRollback(_ input: ContinueUpdateRollbackInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ContinueUpdateRollbackOutput { @@ -254,12 +254,12 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again. A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail. + /// For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again. A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance still exists and attempts to roll back to it, causing the update rollback to fail. /// /// Parameters: /// - clientRequestToken: A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that CloudFormation successfully received them. - /// - resourcesToSkip: A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason. Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable. Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources. To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy. - /// - roleARN: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. + /// - resourcesToSkip: A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason. Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable. Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources. To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Continue rolling back from failed nested stack updates. + /// - roleARN: The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. /// - stackName: The name or the unique ID of the stack that you want to continue rolling back. Don't specify the name of a nested stack (a stack that was created by using the AWS::CloudFormation::Stack resource). Instead, use this operation on the parent stack (the stack that contains the AWS::CloudFormation::Stack resource). /// - logger: Logger use during operation @inlinable @@ -295,19 +295,19 @@ public struct CloudFormation: AWSService { /// Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that CloudFormation will create. If you create a change set for an existing stack, CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack. To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action. When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. CloudFormation doesn't make changes until you execute the change set. To create a change set for the entire stack hierarchy, set IncludeNestedStacks to True. /// /// Parameters: - /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Using CloudFormation macros to perform custom processing on templates. Only one of the Capabilities and ResourceType parameters can be specified. + /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified. /// - changeSetName: The name of the change set. The name must be unique among all change sets that are associated with the specified stack. A change set name can contain only alphanumeric, case sensitive characters, and hyphens. It must start with an alphabetical character and can't exceed 128 characters. /// - changeSetType: The type of change set operation. To create a change set for a new stack, specify CREATE. To create a change set for an existing stack, specify UPDATE. To create a change set for an import operation, specify IMPORT. If you create a change set for a new stack, CloudFormation creates a stack with a unique stack ID, but no template or resources. The stack will be in the REVIEW_IN_PROGRESS state until you execute the change set. By default, CloudFormation specifies UPDATE. You can't use the UPDATE type to create a change set for a new stack or the CREATE type to create a change set for an existing stack. /// - clientToken: A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that CloudFormation successfully received them. /// - description: A description to help you identify this change set. - /// - importExistingResources: Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation management in the CloudFormation User Guide. + /// - importExistingResources: Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Import Amazon Web Services resources into a CloudFormation stack with a resource import in the CloudFormation User Guide. /// - includeNestedStacks: Creates a change set for the all nested stacks specified in the template. The default behavior of this action is set to False. To include nested sets in a change set, specify True. - /// - notificationARNs: The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. + /// - notificationARNs: The Amazon Resource Names (ARNs) of Amazon SNS topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. /// - onStackFailure: Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values: DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED. DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation. ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation. For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted. /// - parameters: A list of Parameter structures that specify input parameters for the change set. For more information, see the Parameter data type. /// - resourcesToImport: The resources to import into your stack. - /// - resourceTypes: The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Controlling access with Identity and Access Management in the CloudFormation User Guide. Only one of the Capabilities and ResourceType parameters can be specified. - /// - roleARN: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. + /// - resourceTypes: The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Control access with Identity and Access Management in the CloudFormation User Guide. Only one of the Capabilities and ResourceType parameters can be specified. + /// - roleARN: The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. /// - rollbackConfiguration: The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. /// - stackName: The name or the unique ID of the stack for which you are creating a change set. CloudFormation generates the change set by comparing this stack's information with the information that you submit, such as a modified template or different parameter input values. /// - tags: Key-value pairs to associate with this stack. CloudFormation also propagates these tags to resources in the stack. You can specify a maximum of 50 tags. @@ -400,7 +400,7 @@ public struct CloudFormation: AWSService { return try await self.createGeneratedTemplate(input, logger: logger) } - /// Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation. + /// Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation. For more information about creating a stack and monitoring stack progress, see Managing Amazon Web Services resources as a single unit with CloudFormation stacks in the CloudFormation User Guide. @Sendable @inlinable public func createStack(_ input: CreateStackInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateStackOutput { @@ -413,26 +413,26 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation. + /// Creates a stack as specified in the template. After the call completes successfully, the stack creation starts. You can check the status of the stack through the DescribeStacks operation. For more information about creating a stack and monitoring stack progress, see Managing Amazon Web Services resources as a single unit with CloudFormation stacks in the CloudFormation User Guide. /// /// Parameters: - /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability. You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Using CloudFormation macros to perform custom processing on templates. Only one of the Capabilities and ResourceType parameters can be specified. + /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability. You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified. /// - clientRequestToken: A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that CloudFormation successfully received them. All events initiated by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. /// - disableRollback: Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure, but not both. Default: false - /// - enableTerminationProtection: Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. - /// - notificationARNs: The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI). + /// - enableTerminationProtection: Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect CloudFormation stacks from being deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. + /// - notificationARNs: The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI). /// - onFailure: Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both. Default: ROLLBACK /// - parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. - /// - resourceTypes: The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. + /// - resourceTypes: The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Control access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. /// - retainExceptOnCreate: When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false - /// - roleARN: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. + /// - roleARN: The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. /// - rollbackConfiguration: The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. /// - stackName: The name that's associated with the stack. The name must be unique in the Region in which you are creating the stack. A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetical character and can't be longer than 128 characters. - /// - stackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. + /// - stackPolicyBody: Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. /// - stackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. /// - tags: Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified. - /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. - /// - templateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// - templateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. /// - timeoutInMinutes: The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back. /// - logger: Logger use during operation @inlinable @@ -549,7 +549,7 @@ public struct CloudFormation: AWSService { /// - administrationRoleARN: The Amazon Resource Name (ARN) of the IAM role to use to create this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting Permissions for Stack Set Operations in the CloudFormation User Guide. /// - autoDeployment: Describes whether StackSets automatically deploys to Organizations accounts that are added to the target organization or organizational unit (OU). Specify only if PermissionModel is SERVICE_MANAGED. /// - callAs: [Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. By default, SELF is specified. Use SELF for stack sets with self-managed permissions. To create a stack set with service-managed permissions while signed in to the management account, specify SELF. To create a stack set with service-managed permissions while signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web Services account must be registered as a delegated admin in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide. Stack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated administrators. - /// - capabilities: In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. + /// - capabilities: In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. /// - clientRequestToken: A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that CloudFormation successfully received them. If you don't specify an operation ID, the SDK generates one automatically. /// - description: A description of the stack set. You can use the description to identify the stack set's purpose or other important information. /// - executionRoleName: The name of the IAM execution role to use to create the stack set. If you do not specify an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation. Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets. @@ -559,8 +559,8 @@ public struct CloudFormation: AWSService { /// - stackId: The stack ID you are importing into a new stack set. Specify the Amazon Resource Name (ARN) of the stack. /// - stackSetName: The name to associate with the stack set. The name must be unique in the Region where you create your stack set. A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an alphabetic character and can't be longer than 128 characters. /// - tags: The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified. If you specify tags as part of a CreateStackSet action, CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created. - /// - templateBody: The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. - /// - templateURL: The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// - templateBody: The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// - templateURL: The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. /// - logger: Logger use during operation @inlinable public func createStackSet( @@ -723,7 +723,7 @@ public struct CloudFormation: AWSService { return try await self.deleteGeneratedTemplate(input, logger: logger) } - /// Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully. + /// Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully. For more information about deleting a stack, see Delete a stack from the CloudFormation console in the CloudFormation User Guide. @Sendable @inlinable public func deleteStack(_ input: DeleteStackInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -736,13 +736,13 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully. + /// Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't show up in the DescribeStacks operation if the deletion has been completed successfully. For more information about deleting a stack, see Delete a stack from the CloudFormation console in the CloudFormation User Guide. /// /// Parameters: /// - clientRequestToken: A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry DeleteStack requests to ensure that CloudFormation successfully received them. All events initiated by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. /// - deletionMode: Specifies the deletion mode for the stack. Possible values are: STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this parameter. FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to resource deletion failure. /// - retainResources: For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, CloudFormation deletes the stack but doesn't delete the retained resources. Retaining resources is useful when you can't delete a resource, such as a non-empty S3 bucket, but you want to delete the stack. - /// - roleARN: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. + /// - roleARN: The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. /// - stackName: The name or the unique stack ID that's associated with the stack. /// - logger: Logger use during operation @inlinable @@ -884,7 +884,7 @@ public struct CloudFormation: AWSService { return try await self.deregisterType(input, logger: logger) } - /// Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see CloudFormation Quotas in the CloudFormation User Guide. + /// Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see Understand CloudFormation quotas in the CloudFormation User Guide. @Sendable @inlinable public func describeAccountLimits(_ input: DescribeAccountLimitsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAccountLimitsOutput { @@ -897,7 +897,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see CloudFormation Quotas in the CloudFormation User Guide. + /// Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see Understand CloudFormation quotas in the CloudFormation User Guide. /// /// Parameters: /// - nextToken: A string that identifies the next page of limits that you want to retrieve. @@ -913,7 +913,7 @@ public struct CloudFormation: AWSService { return try await self.describeAccountLimits(input, logger: logger) } - /// Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the CloudFormation User Guide. + /// Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Update CloudFormation stacks using change sets in the CloudFormation User Guide. @Sendable @inlinable public func describeChangeSet(_ input: DescribeChangeSetInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeChangeSetOutput { @@ -926,7 +926,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the CloudFormation User Guide. + /// Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set. For more information, see Update CloudFormation stacks using change sets in the CloudFormation User Guide. /// /// Parameters: /// - changeSetName: The name or Amazon Resource Name (ARN) of the change set that you want to describe. @@ -1047,7 +1047,7 @@ public struct CloudFormation: AWSService { return try await self.describeOrganizationsAccess(input, logger: logger) } - /// Returns information about a CloudFormation extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. For more information about registering as a publisher, see: RegisterPublisher Publishing extensions to make them available for public use in the CloudFormation CLI User Guide + /// Returns information about a CloudFormation extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. For more information about registering as a publisher, see: RegisterPublisher Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide @Sendable @inlinable public func describePublisher(_ input: DescribePublisherInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribePublisherOutput { @@ -1060,7 +1060,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns information about a CloudFormation extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. For more information about registering as a publisher, see: RegisterPublisher Publishing extensions to make them available for public use in the CloudFormation CLI User Guide + /// Returns information about a CloudFormation extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. For more information about registering as a publisher, see: RegisterPublisher Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide /// /// Parameters: /// - publisherId: The ID of the extension publisher. If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account. @@ -1105,7 +1105,7 @@ public struct CloudFormation: AWSService { return try await self.describeResourceScan(input, logger: logger) } - /// Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. + /// Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. @Sendable @inlinable public func describeStackDriftDetectionStatus(_ input: DescribeStackDriftDetectionStatusInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStackDriftDetectionStatusOutput { @@ -1118,7 +1118,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. + /// Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information about stack and resource drift, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. /// /// Parameters: /// - stackDriftDetectionId: The ID of the drift detection results of this operation. CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number of drift results CloudFormation retains for any given stack, and for how long, may vary. @@ -1134,7 +1134,7 @@ public struct CloudFormation: AWSService { return try await self.describeStackDriftDetectionStatus(input, logger: logger) } - /// Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide. You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID). + /// Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see Understand CloudFormation stack creation events in the CloudFormation User Guide. You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID). @Sendable @inlinable public func describeStackEvents(_ input: DescribeStackEventsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStackEventsOutput { @@ -1147,7 +1147,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide. You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID). + /// Returns all stack related events for a specified stack in reverse chronological order. For more information about a stack's event history, see Understand CloudFormation stack creation events in the CloudFormation User Guide. You can list events for stacks that have failed to create or have been deleted by specifying the unique stack identifier (stack ID). /// /// Parameters: /// - nextToken: A string that identifies the next page of events that you want to retrieve. @@ -1236,7 +1236,7 @@ public struct CloudFormation: AWSService { return try await self.describeStackResource(input, logger: logger) } - /// Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack. + /// Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resource type support for imports and drift detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack. @Sendable @inlinable public func describeStackResourceDrifts(_ input: DescribeStackResourceDriftsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStackResourceDriftsOutput { @@ -1249,7 +1249,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack. + /// Returns drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects configuration drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resource type support for imports and drift detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack. /// /// Parameters: /// - maxResults: The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results. @@ -1274,7 +1274,7 @@ public struct CloudFormation: AWSService { return try await self.describeStackResourceDrifts(input, logger: logger) } - /// Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned. Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead. For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted. You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the CloudFormation User Guide. A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request. + /// Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned. Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead. For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted. You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, see the CloudFormation User Guide. A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request. @Sendable @inlinable public func describeStackResources(_ input: DescribeStackResourcesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStackResourcesOutput { @@ -1287,7 +1287,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned. Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead. For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted. You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, go to the CloudFormation User Guide. A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request. + /// Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId is specified, the associated resources of the stack that the resource belongs to are returned. Only the first 100 resources will be returned. If your stack has more resources than this, you should use ListStackResources instead. For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the stack has been deleted. You must specify either StackName or PhysicalResourceId, but not both. In addition, you can specify LogicalResourceId to filter the returned result. For more information about resources, the LogicalResourceId and PhysicalResourceId, see the CloudFormation User Guide. A ValidationError is returned if you specify both StackName and PhysicalResourceId in the same request. /// /// Parameters: /// - logicalResourceId: The logical name of the resource as specified in the template. Default: There is no default value. @@ -1376,7 +1376,7 @@ public struct CloudFormation: AWSService { return try await self.describeStackSetOperation(input, logger: logger) } - /// Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide. If the stack doesn't exist, a ValidationError is returned. + /// Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created. For more information about a stack's event history, see Understand CloudFormation stack creation events in the CloudFormation User Guide. If the stack doesn't exist, a ValidationError is returned. @Sendable @inlinable public func describeStacks(_ input: DescribeStacksInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeStacksOutput { @@ -1389,7 +1389,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created. For more information about a stack's event history, see CloudFormation stack creation events in the CloudFormation User Guide. If the stack doesn't exist, a ValidationError is returned. + /// Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created. For more information about a stack's event history, see Understand CloudFormation stack creation events in the CloudFormation User Guide. If the stack doesn't exist, a ValidationError is returned. /// /// Parameters: /// - nextToken: A string that identifies the next page of stacks that you want to retrieve. @@ -1481,7 +1481,7 @@ public struct CloudFormation: AWSService { return try await self.describeTypeRegistration(input, logger: logger) } - /// Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources. For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection. DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself. + /// Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources. For a list of stack resources that currently support drift detection, see Resource type support for imports and drift detection. DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself. @Sendable @inlinable public func detectStackDrift(_ input: DetectStackDriftInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DetectStackDriftOutput { @@ -1494,7 +1494,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources. For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection. DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself. + /// Detects whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources. For a list of stack resources that currently support drift detection, see Resource type support for imports and drift detection. DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources. When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself. /// /// Parameters: /// - logicalResourceIds: The logical names of any resources you want to use as filters. @@ -1513,7 +1513,7 @@ public struct CloudFormation: AWSService { return try await self.detectStackDrift(input, logger: logger) } - /// Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection. Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection. + /// Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection. Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resource type support for imports and drift detection. @Sendable @inlinable public func detectStackResourceDrift(_ input: DetectStackResourceDriftInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DetectStackResourceDriftOutput { @@ -1526,7 +1526,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection. Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection. + /// Returns information about whether a resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detect unmanaged configuration changes to stacks and resources with drift detection. Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection. Resources that don't currently support drift detection can't be checked. For a list of resources that support drift detection, see Resource type support for imports and drift detection. /// /// Parameters: /// - logicalResourceId: The logical name of the resource for which to return drift information. @@ -1600,8 +1600,8 @@ public struct CloudFormation: AWSService { /// /// Parameters: /// - parameters: A list of Parameter structures that specify input parameters. - /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.) Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used. - /// - templateURL: Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used. + /// - templateURL: Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. /// - logger: Logger use during operation @inlinable public func estimateTemplateCost( @@ -1774,9 +1774,9 @@ public struct CloudFormation: AWSService { /// - callAs: [Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. By default, SELF is specified. Use SELF for stack sets with self-managed permissions. If you are signed in to the management account, specify SELF. If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide. /// - stackName: The name or the stack ID that's associated with the stack, which aren't always interchangeable. For running stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the unique stack ID. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. /// - stackSetName: The name or unique ID of the stack set from which the stack was created. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. - /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. + /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. /// - templateSummaryConfig: Specifies options for the GetTemplateSummary API action. - /// - templateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. + /// - templateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. /// - logger: Logger use during operation @inlinable public func getTemplateSummary( @@ -1878,7 +1878,7 @@ public struct CloudFormation: AWSService { return try await self.listChangeSets(input, logger: logger) } - /// Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function. For more information, see CloudFormation export stack output values. + /// Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function. For more information, see Get exported outputs from a deployed CloudFormation stack. @Sendable @inlinable public func listExports(_ input: ListExportsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListExportsOutput { @@ -1891,7 +1891,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function. For more information, see CloudFormation export stack output values. + /// Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function. For more information, see Get exported outputs from a deployed CloudFormation stack. /// /// Parameters: /// - nextToken: A string (provided by the ListExports response output) that identifies the next page of exported output values that you asked to retrieve. @@ -2545,7 +2545,7 @@ public struct CloudFormation: AWSService { return try await self.listTypes(input, logger: logger) } - /// Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide. To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher. + /// Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher. @Sendable @inlinable public func publishType(_ input: PublishTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PublishTypeOutput { @@ -2558,7 +2558,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide. To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher. + /// Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public extensions are available for use by all CloudFormation users. For more information about publishing extensions, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher. /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) of the extension. Conditional: You must specify Arn, or TypeName and Type. @@ -2630,7 +2630,7 @@ public struct CloudFormation: AWSService { return try await self.recordHandlerProgress(input, logger: logger) } - /// Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions. For information about requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide. + /// Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions. For information about requirements for registering as a public extension publisher, see Prerequisite: Registering your account to publish CloudFormation extensions in the CloudFormation Command Line Interface (CLI) User Guide. @Sendable @inlinable public func registerPublisher(_ input: RegisterPublisherInput, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterPublisherOutput { @@ -2643,11 +2643,11 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions. For information about requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide. + /// Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions. For information about requirements for registering as a public extension publisher, see Prerequisite: Registering your account to publish CloudFormation extensions in the CloudFormation Command Line Interface (CLI) User Guide. /// /// Parameters: /// - acceptTermsAndConditions: Whether you accept the Terms and Conditions for publishing extensions in the CloudFormation registry. You must accept the terms and conditions in order to register to publish public extensions to the CloudFormation registry. The default is false. - /// - connectionArn: If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account. For more information, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide. + /// - connectionArn: If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account. For more information, see Prerequisite: Registering your account to publish CloudFormation extensions in the CloudFormation Command Line Interface (CLI) User Guide. /// - logger: Logger use during operation @inlinable public func registerPublisher( @@ -2662,7 +2662,7 @@ public struct CloudFormation: AWSService { return try await self.registerPublisher(input, logger: logger) } - /// Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes: Validating the extension schema. Determining which handlers, if any, have been specified for the extension. Making the extension available for use in your account. For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide. You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary. Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request. Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes: Validating the extension schema. Determining which handlers, if any, have been specified for the extension. Making the extension available for use in your account. For more information about how to develop extensions and ready them for registration, see Creating resource types using the CloudFormation CLI in the CloudFormation Command Line Interface (CLI) User Guide. You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary. Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request. Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. @Sendable @inlinable public func registerType(_ input: RegisterTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterTypeOutput { @@ -2675,13 +2675,13 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes: Validating the extension schema. Determining which handlers, if any, have been specified for the extension. Making the extension available for use in your account. For more information about how to develop extensions and ready them for registration, see Creating Resource Providers in the CloudFormation CLI User Guide. You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary. Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request. Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// Registers an extension with the CloudFormation service. Registering an extension makes it available for use in CloudFormation templates in your Amazon Web Services account, and includes: Validating the extension schema. Determining which handlers, if any, have been specified for the extension. Making the extension available for use in your account. For more information about how to develop extensions and ready them for registration, see Creating resource types using the CloudFormation CLI in the CloudFormation Command Line Interface (CLI) User Guide. You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and per Region. Use DeregisterType to deregister specific extension versions if necessary. Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request. Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. /// /// Parameters: /// - clientRequestToken: A unique identifier that acts as an idempotency key for this registration request. Specifying a client request token prevents CloudFormation from generating more than one version of an extension from the same registration request, even if the request is submitted multiple times. /// - executionRoleArn: The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension. For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the CloudFormation service principal (resources.cloudformation.amazonaws.com). For more information about adding trust relationships, see Modifying a role trust policy in the Identity and Access Management User Guide. If your extension calls Amazon Web Services APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account. When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary session token, which it then passes to the resource type handler, thereby supplying your resource type with the appropriate credentials. /// - loggingConfig: Specifies logging configuration information for an extension. - /// - schemaHandlerPackage: A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation CLI User Guide. The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide. + /// - schemaHandlerPackage: A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation Command Line Interface (CLI) User Guide. The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide. /// - type: The kind of extension. /// - typeName: The name of the extension being registered. We suggest that extension names adhere to the following patterns: For resource types, company_or_organization::service::type. For modules, company_or_organization::service::type::MODULE. For hooks, MyCompany::Testing::MyTestHook. The following organization namespaces are reserved and can't be used in your extension names: Alexa AMZN Amazon AWS Custom Dev /// - logger: Logger use during operation @@ -2724,7 +2724,7 @@ public struct CloudFormation: AWSService { /// Parameters: /// - clientRequestToken: A unique identifier for this RollbackStack request. /// - retainExceptOnCreate: When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false - /// - roleARN: The Amazon Resource Name (ARN) of an Identity and Access Management role that CloudFormation assumes to rollback the stack. + /// - roleARN: The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to rollback the stack. /// - stackName: The name that's associated with the stack. /// - logger: Logger use during operation @inlinable @@ -2761,7 +2761,7 @@ public struct CloudFormation: AWSService { /// /// Parameters: /// - stackName: The name or unique stack ID that you want to associate a policy with. - /// - stackPolicyBody: Structure containing the stack policy body. For more information, go to Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. + /// - stackPolicyBody: Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. /// - stackPolicyURL: Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an Amazon S3 bucket in the same Amazon Web Services Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. /// - logger: Logger use during operation @inlinable @@ -2779,7 +2779,7 @@ public struct CloudFormation: AWSService { return try await self.setStackPolicy(input, logger: logger) } - /// Specifies the configuration data for a registered CloudFormation extension, in the given account and Region. To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide. + /// Specifies the configuration data for a registered CloudFormation extension, in the given account and Region. To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Specify values stored in other services using dynamic references in the CloudFormation User Guide. @Sendable @inlinable public func setTypeConfiguration(_ input: SetTypeConfigurationInput, logger: Logger = AWSClient.loggingDisabled) async throws -> SetTypeConfigurationOutput { @@ -2792,10 +2792,10 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Specifies the configuration data for a registered CloudFormation extension, in the given account and Region. To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Using dynamic references to specify template values in the CloudFormation User Guide. + /// Specifies the configuration data for a registered CloudFormation extension, in the given account and Region. To view the current configuration data for an extension, refer to the ConfigurationSchema element of DescribeType. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such as third-party credentials. For more details on dynamic references, see Specify values stored in other services using dynamic references in the CloudFormation User Guide. /// /// Parameters: - /// - configuration: The configuration data for the extension, in this account and Region. The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide. + /// - configuration: The configuration data for the extension, in this account and Region. The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining the account-level configuration of an extension in the CloudFormation Command Line Interface (CLI) User Guide. /// - configurationAlias: An alias by which to refer to this extension configuration data. Conditional: Specifying a configuration alias is required when setting a configuration for a resource type extension. /// - type: The type of extension. Conditional: You must specify ConfigurationArn, or Type and TypeName. /// - typeArn: The Amazon Resource Name (ARN) for the extension, in this account and Region. For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account and Region. Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an extension, but not for a specific extension version. @@ -2960,7 +2960,7 @@ public struct CloudFormation: AWSService { return try await self.stopStackSetOperation(input, logger: logger) } - /// Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry. For resource types, this includes passing all contracts tests defined for the type. For modules, this includes determining if the module's model meets all necessary requirements. For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide. If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing. To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType. Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide. + /// Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry. For resource types, this includes passing all contracts tests defined for the type. For modules, this includes determining if the module's model meets all necessary requirements. For more information, see Testing your public extension before publishing in the CloudFormation Command Line Interface (CLI) User Guide. If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing. To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType. Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. @Sendable @inlinable public func testType(_ input: TestTypeInput, logger: Logger = AWSClient.loggingDisabled) async throws -> TestTypeOutput { @@ -2973,11 +2973,11 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry. For resource types, this includes passing all contracts tests defined for the type. For modules, this includes determining if the module's model meets all necessary requirements. For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide. If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing. To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType. Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide. + /// Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry. For resource types, this includes passing all contracts tests defined for the type. For modules, this includes determining if the module's model meets all necessary requirements. For more information, see Testing your public extension before publishing in the CloudFormation Command Line Interface (CLI) User Guide. If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing. To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType. Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. /// /// Parameters: /// - arn: The Amazon Resource Name (ARN) of the extension. Conditional: You must specify Arn, or TypeName and Type. - /// - logDeliveryBucket: The S3 bucket to which CloudFormation delivers the contract test execution logs. CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED. The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions: GetObject PutObject For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Amazon Web Services Identity and Access Management User Guide. + /// - logDeliveryBucket: The S3 bucket to which CloudFormation delivers the contract test execution logs. CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED. The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions: GetObject PutObject For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide. /// - type: The type of the extension to test. Conditional: You must specify Arn, or TypeName and Type. /// - typeName: The name of the extension to test. Conditional: You must specify Arn, or TypeName and Type. /// - versionId: The version of the extension to test. You can specify the version id with either Arn, or with TypeName and Type. If you don't specify a version, CloudFormation uses the default version of the extension in this account and Region for testing. @@ -3045,7 +3045,7 @@ public struct CloudFormation: AWSService { return try await self.updateGeneratedTemplate(input, logger: logger) } - /// Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action. To get a copy of the template for an existing stack, you can use the GetTemplate action. For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack. + /// Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action. To get a copy of the template for an existing stack, you can use the GetTemplate action. For more information about updating a stack and monitoring the progress of the update, see Managing Amazon Web Services resources as a single unit with CloudFormation stacks in the CloudFormation User Guide. @Sendable @inlinable public func updateStack(_ input: UpdateStackInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateStackOutput { @@ -3058,17 +3058,17 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action. To get a copy of the template for an existing stack, you can use the GetTemplate action. For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack. + /// Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack through the DescribeStacks action. To get a copy of the template for an existing stack, you can use the GetTemplate action. For more information about updating a stack and monitoring the progress of the update, see Managing Amazon Web Services resources as a single unit with CloudFormation stacks in the CloudFormation User Guide. /// /// Parameters: - /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability. You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Only one of the Capabilities and ResourceType parameters can be specified. + /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability. You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified. /// - clientRequestToken: A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that CloudFormation successfully received them. All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. /// - disableRollback: Preserve the state of previously provisioned resources when an operation fails. Default: False /// - notificationARNs: Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that CloudFormation associates with the stack. Specify an empty list to remove all notification topics. /// - parameters: A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. - /// - resourceTypes: The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. + /// - resourceTypes: The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Control access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. /// - retainExceptOnCreate: When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false - /// - roleARN: The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. + /// - roleARN: The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. /// - rollbackConfiguration: The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. /// - stackName: The name or unique stack ID of the stack to update. /// - stackPolicyBody: Structure containing a new stack policy body. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you don't specify a stack policy, the current policy that is associated with the stack is unchanged. @@ -3076,8 +3076,8 @@ public struct CloudFormation: AWSService { /// - stackPolicyDuringUpdateURL: Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not both. If you want to update protected resources, specify a temporary overriding stack policy during this update. If you don't specify a stack policy, the current policy that is associated with the stack will be used. /// - stackPolicyURL: Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. You might update the stack policy, for example, in order to protect a new resource that you created during a stack update. If you don't specify a stack policy, the current policy that is associated with the stack is unchanged. /// - tags: Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags. If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags. - /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.) Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. - /// - templateURL: Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. + /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. + /// - templateURL: Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. /// - usePreviousTemplate: Reuse the existing template that is associated with the stack that you are updating. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. /// - logger: Logger use during operation @inlinable @@ -3195,7 +3195,7 @@ public struct CloudFormation: AWSService { /// - administrationRoleARN: The Amazon Resource Name (ARN) of the IAM role to use to update this stack set. Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the CloudFormation User Guide. If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously. /// - autoDeployment: [Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit (OU). If you specify AutoDeployment, don't specify DeploymentTargets or Regions. /// - callAs: [Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. By default, SELF is specified. Use SELF for stack sets with self-managed permissions. If you are signed in to the management account, specify SELF. If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide. - /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. + /// - capabilities: In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. /// - deploymentTargets: [Service-managed permissions] The Organizations accounts in which to update associated stack instances. To update all the stack instances associated with this stack set, do not specify DeploymentTargets or Regions. If the stack set update includes changes to the template (that is, if TemplateBody or TemplateURL is specified), or the Parameters, CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Amazon Web Services Regions. If the stack set update doesn't include changes to the template or parameters, CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status. /// - description: A brief description of updates that you are making. /// - executionRoleName: The name of the IAM execution role to use to update the stack set. If you do not specify an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation. Specify an IAM role only if you are using customized execution roles to control which stack resources users and groups can include in their stack sets. If you specify a customized execution role, CloudFormation uses that role to update the stack. If you do not specify a customized execution role, CloudFormation performs the update using the role previously associated with the stack set, so long as you have permissions to perform operations on the stack set. @@ -3207,8 +3207,8 @@ public struct CloudFormation: AWSService { /// - regions: The Amazon Web Services Regions in which to update associated stack instances. If you specify Regions, you must also specify accounts in which to update stack set instances. To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties. If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Regions. If the stack set update does not include changes to the template or parameters, CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status. /// - stackSetName: The name or unique ID of the stack set that you want to update. /// - tags: The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags. If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means: If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify any tags using this parameter, you must specify all the tags that you want associated with this stack set, even tags you've specified before (for example, when creating the stack set or during a previous update of the stack set.). Any tags that you don't include in the updated list of tags are removed from the stack set, and therefore from the stacks and resources as well. If you specify an empty value, CloudFormation removes all currently associated tags. If you specify new tags as part of an UpdateStackSet action, CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated. - /// - templateBody: The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. - /// - templateURL: The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. + /// - templateBody: The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. + /// - templateURL: The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. /// - usePreviousTemplate: Use the existing template that's associated with the stack set that you're updating. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. /// - logger: Logger use during operation @inlinable @@ -3258,7 +3258,7 @@ public struct CloudFormation: AWSService { return try await self.updateStackSet(input, logger: logger) } - /// Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. + /// Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect a CloudFormation stack from being deleted in the CloudFormation User Guide. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. @Sendable @inlinable public func updateTerminationProtection(_ input: UpdateTerminationProtectionInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateTerminationProtectionOutput { @@ -3271,7 +3271,7 @@ public struct CloudFormation: AWSService { logger: logger ) } - /// Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. + /// Updates termination protection for the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect a CloudFormation stack from being deleted in the CloudFormation User Guide. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. /// /// Parameters: /// - enableTerminationProtection: Whether to enable termination protection on the specified stack. @@ -3306,8 +3306,8 @@ public struct CloudFormation: AWSService { /// Validates a specified template. CloudFormation first checks if the template is valid JSON. If it isn't, CloudFormation checks if the template is valid YAML. If both these checks fail, CloudFormation returns a template validation error. /// /// Parameters: - /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the CloudFormation User Guide. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. - /// - templateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + /// - templateBody: Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + /// - templateURL: Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. /// - logger: Logger use during operation @inlinable public func validateTemplate( diff --git a/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift b/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift index add7338d90..fc2374cd10 100644 --- a/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift +++ b/Sources/Soto/Services/CloudFormation/CloudFormation_shapes.swift @@ -972,10 +972,10 @@ extension CloudFormation { public struct ContinueUpdateRollbackInput: AWSEncodableShape { /// A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name. You might retry ContinueUpdateRollback requests to ensure that CloudFormation successfully received them. public let clientRequestToken: String? - /// A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason. Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable. Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources. To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Using ResourcesToSkip to recover a nested stacks hierarchy. + /// A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation. You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason. Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the skipped resources will be inconsistent with the state of the resources in the stack template. Before performing another stack update, you must update the stack or resources to be consistent with each other. If you don't, subsequent stack updates might fail, and the stack will become unrecoverable. Specify the minimum number of resources required to successfully roll back your stack. For example, a failed resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the dependent resources. To skip resources that are part of nested stacks, use the following format: NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or DELETE_FAILED. Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an example of a continue update rollback operation with nested stacks, see Continue rolling back from failed nested stack updates. @OptionalCustomCoding> public var resourcesToSkip: [String]? - /// The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. + /// The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. public let roleARN: String? /// The name or the unique ID of the stack that you want to continue rolling back. Don't specify the name of a nested stack (a stack that was created by using the AWS::CloudFormation::Stack resource). Instead, use this operation on the parent stack (the stack that contains the AWS::CloudFormation::Stack resource). public let stackName: String? @@ -1014,7 +1014,7 @@ extension CloudFormation { } public struct CreateChangeSetInput: AWSEncodableShape { - /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Using CloudFormation macros to perform custom processing on templates. Only one of the Capabilities and ResourceType parameters can be specified. + /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no effect. If you want to create a stack from a stack template that contains macros and nested stacks, you must create or update the stack directly from the template using the CreateStack or UpdateStack action, and specifying this capability. For more information about macros, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified. @OptionalCustomCoding> public var capabilities: [Capability]? /// The name of the change set. The name must be unique among all change sets that are associated with the specified stack. A change set name can contain only alphanumeric, case sensitive characters, and hyphens. It must start with an alphabetical character and can't exceed 128 characters. @@ -1025,11 +1025,11 @@ extension CloudFormation { public let clientToken: String? /// A description to help you identify this change set. public let description: String? - /// Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation management in the CloudFormation User Guide. + /// Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Import Amazon Web Services resources into a CloudFormation stack with a resource import in the CloudFormation User Guide. public let importExistingResources: Bool? /// Creates a change set for the all nested stacks specified in the template. The default behavior of this action is set to False. To include nested sets in a change set, specify True. public let includeNestedStacks: Bool? - /// The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. + /// The Amazon Resource Names (ARNs) of Amazon SNS topics that CloudFormation associates with the stack. To remove all associated notification topics, specify an empty list. @OptionalCustomCoding> public var notificationARNs: [String]? /// Determines what action will be taken if stack creation fails. If this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values: DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED. DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation. ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation. For nested stacks, when the OnStackFailure parameter is set to DELETE for the change set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to be deleted. @@ -1040,10 +1040,10 @@ extension CloudFormation { /// The resources to import into your stack. @OptionalCustomCoding> public var resourcesToImport: [ResourceToImport]? - /// The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Controlling access with Identity and Access Management in the CloudFormation User Guide. Only one of the Capabilities and ResourceType parameters can be specified. + /// The template resource types that you have permissions to work with if you execute this change set, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for condition keys in IAM policies for CloudFormation. For more information, see Control access with Identity and Access Management in the CloudFormation User Guide. Only one of the Capabilities and ResourceType parameters can be specified. @OptionalCustomCoding> public var resourceTypes: [String]? - /// The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. + /// The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes when executing the change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least permission. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. public let roleARN: String? /// The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. public let rollbackConfiguration: RollbackConfiguration? @@ -1206,16 +1206,16 @@ extension CloudFormation { } public struct CreateStackInput: AWSEncodableShape { - /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability. You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Using CloudFormation macros to perform custom processing on templates. Only one of the Capabilities and ResourceType parameters can be specified. + /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to create the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually creating the stack. If your stack template contains one or more macros, and you choose to create a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to create a stack from a stack template that contains macros and nested stacks, you must create the stack directly from the template using this capability. You should only create stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified. @OptionalCustomCoding> public var capabilities: [Capability]? /// A unique identifier for this CreateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create a stack with the same name. You might retry CreateStack requests to ensure that CloudFormation successfully received them. All events initiated by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. public let clientRequestToken: String? /// Set to true to disable rollback of the stack if stack creation failed. You can specify either DisableRollback or OnFailure, but not both. Default: false public let disableRollback: Bool? - /// Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. + /// Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protect CloudFormation stacks from being deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks by default. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. public let enableTerminationProtection: Bool? - /// The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI). + /// The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI). @OptionalCustomCoding> public var notificationARNs: [String]? /// Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING, ROLLBACK, or DELETE. You can specify either OnFailure or DisableRollback, but not both. Default: ROLLBACK @@ -1223,27 +1223,27 @@ extension CloudFormation { /// A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. @OptionalCustomCoding> public var parameters: [Parameter]? - /// The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. + /// The template resource types that you have permissions to work with for this create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all custom resources), Custom::logical_ID (for a specific custom resource), AWS::service_name::* (for all resources of a particular Amazon Web Services service), and AWS::service_name::resource_logical_ID (for a specific Amazon Web Services resource). If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Control access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. @OptionalCustomCoding> public var resourceTypes: [String]? /// When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false public let retainExceptOnCreate: Bool? - /// The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. + /// The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. public let roleARN: String? /// The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. public let rollbackConfiguration: RollbackConfiguration? /// The name that's associated with the stack. The name must be unique in the Region in which you are creating the stack. A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetical character and can't be longer than 128 characters. public let stackName: String? - /// Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. + /// Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. public let stackPolicyBody: String? /// Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. public let stackPolicyURL: String? /// Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified. @OptionalCustomCoding> public var tags: [Tag]? - /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. public let templateBody: String? - /// Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. public let templateURL: String? /// The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back. public let timeoutInMinutes: Int? @@ -1412,7 +1412,7 @@ extension CloudFormation { public let autoDeployment: AutoDeployment? /// [Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. By default, SELF is specified. Use SELF for stack sets with self-managed permissions. To create a stack set with service-managed permissions while signed in to the management account, specify SELF. To create a stack set with service-managed permissions while signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web Services account must be registered as a delegated admin in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide. Stack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated administrators. public let callAs: CallAs? - /// In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. + /// In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in order for CloudFormation to create the stack set and related stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new IAM users. For those stack sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must create the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions don't currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. @OptionalCustomCoding> public var capabilities: [Capability]? /// A unique identifier for this CreateStackSet request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to create another stack set with the same name. You might retry CreateStackSet requests to ensure that CloudFormation successfully received them. If you don't specify an operation ID, the SDK generates one automatically. @@ -1435,9 +1435,9 @@ extension CloudFormation { /// The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be specified. If you specify tags as part of a CreateStackSet action, CloudFormation checks to see if you have the required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an access denied error, and the stack set is not created. @OptionalCustomCoding> public var tags: [Tag]? - /// The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. public let templateBody: String? - /// The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. + /// The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both. public let templateURL: String? @inlinable @@ -1611,7 +1611,7 @@ extension CloudFormation { /// For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the resources you want to retain. During deletion, CloudFormation deletes the stack but doesn't delete the retained resources. Retaining resources is useful when you can't delete a resource, such as a non-empty S3 bucket, but you want to delete the stack. @OptionalCustomCoding> public var retainResources: [String]? - /// The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. + /// The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to delete the stack. CloudFormation uses the role's credentials to make calls on your behalf. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that's generated from your user credentials. public let roleARN: String? /// The name or the unique stack ID that's associated with the stack. public let stackName: String? @@ -1989,7 +1989,7 @@ extension CloudFormation { public let includeNestedStacks: Bool? /// If the output exceeds 1 MB, a string that identifies the next page of changes. If there is no additional page, this value is null. public let nextToken: String? - /// The ARNs of the Amazon Simple Notification Service (Amazon SNS) topics that will be associated with the stack if you execute the change set. + /// The ARNs of the Amazon SNS topics that will be associated with the stack if you execute the change set. @OptionalCustomCoding> public var notificationARNs: [String]? /// Determines what action will be taken if stack creation fails. When this parameter is specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these values: DELETE - Deletes the change set if the stack creation fails. This is only valid when the ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status of the stack is DELETE_FAILED. DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying true for the DisableRollback parameter to the ExecuteChangeSet API operation. ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying false for the DisableRollback parameter to the ExecuteChangeSet API operation. @@ -2243,7 +2243,7 @@ extension CloudFormation { public var resourceTypes: [String]? /// The time that the resource scan was started. public let startTime: Date? - /// Status of the resource scan. INPROGRESS The resource scan is still in progress. COMPLETE The resource scan is complete. EXPIRED The resource scan has expired. FAILED The resource scan has failed. + /// Status of the resource scan. INPROGRESS The resource scan is still in progress. COMPLETE The resource scan is complete. EXPIRED The resource scan has expired. FAILED The resource scan has failed. public let status: ResourceScanStatus? /// The reason for the resource scan status, providing more information if a failure happened. public let statusReason: String? @@ -2459,7 +2459,7 @@ extension CloudFormation { public struct DescribeStackResourceDriftsOutput: AWSDecodableShape { /// If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call DescribeStackResourceDrifts again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null. public let nextToken: String? - /// Drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that do not currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection. + /// Drift information for the resources that have been checked for drift in the specified stack. This includes actual and expected configuration values for resources where CloudFormation detects drift. For a given stack, there will be one StackResourceDrift for each stack resource that has been checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that do not currently support drift detection aren't checked, and so not included. For a list of resources that support drift detection, see Resource type support for imports and drift detection. @OptionalCustomCoding> public var stackResourceDrifts: [StackResourceDrift]? @@ -2713,9 +2713,9 @@ extension CloudFormation { public struct DescribeTypeOutput: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the extension. public let arn: String? - /// Whether CloudFormation automatically updates the extension in this account and Region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. For more information, see Activating public extensions for use in your account in the CloudFormation User Guide. + /// Whether CloudFormation automatically updates the extension in this account and Region when a new minor version is published by the extension publisher. Major versions released by the publisher must be manually updated. For more information, see Automatically use new versions of extensions in the CloudFormation User Guide. public let autoUpdate: Bool? - /// A JSON string that represent the current configuration data for the extension in this account and Region. To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Configuring extensions at the account level in the CloudFormation User Guide. + /// A JSON string that represent the current configuration data for the extension in this account and Region. To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Edit configuration data for extensions in your account in the CloudFormation User Guide. public let configurationSchema: String? /// The ID of the default version of the extension. The default version is used when the extension version isn't specified. This applies only to private extensions you have registered in your account. For public extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more information, see RegisterType. To set the default version of an extension, use SetTypeDefaultVersion. public let defaultVersionId: String? @@ -2739,7 +2739,7 @@ extension CloudFormation { public let loggingConfig: LoggingConfig? /// For public extensions that have been activated for this account and Region, the Amazon Resource Name (ARN) of the public extension. public let originalTypeArn: String? - /// For public extensions that have been activated for this account and Region, the type name of the public extension. If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide. + /// For public extensions that have been activated for this account and Region, the type name of the public extension. If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Use aliases to refer to extensions in the CloudFormation User Guide. public let originalTypeName: String? /// For resource type extensions, the provisioning behavior of the resource type. CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted. Valid values include: FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during stack update operations. IMMUTABLE: The resource type doesn't include an update handler, so the type can't be updated and must instead be replaced during stack update operations. NON_PROVISIONABLE: The resource type doesn't include all the following handlers, and therefore can't actually be provisioned. create read delete public let provisioningType: ProvisioningType? @@ -2750,7 +2750,7 @@ extension CloudFormation { /// For extensions that are modules, the public third-party extensions that must be activated in your account in order for the module itself to be activated. @OptionalCustomCoding> public var requiredActivatedTypes: [RequiredActivatedType]? - /// The schema that defines the extension. For more information about extension schemas, see Resource Provider Schema in the CloudFormation CLI User Guide. + /// The schema that defines the extension. For more information about extension schemas, see Resource type schema in the CloudFormation Command Line Interface (CLI) User Guide. public let schema: String? /// The URL of the source code for the extension. public let sourceUrl: String? @@ -2760,7 +2760,7 @@ extension CloudFormation { public let type: RegistryType? /// The name of the extension. If the extension is a public third-party type you have activated with a type name alias, CloudFormation returns the type name alias. For more information, see ActivateType. public let typeName: String? - /// The contract test status of the registered extension version. To return the extension test status of a specific extension version, you must specify VersionId. This applies only to registered private extension versions. CloudFormation doesn't return this information for public extensions, whether they are activated in your account. PASSED: The extension has passed all its contract tests. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface User Guide. FAILED: The extension has failed one or more contract tests. IN_PROGRESS: Contract tests are currently being performed on the extension. NOT_TESTED: Contract tests haven't been performed on the extension. + /// The contract test status of the registered extension version. To return the extension test status of a specific extension version, you must specify VersionId. This applies only to registered private extension versions. CloudFormation doesn't return this information for public extensions, whether they are activated in your account. PASSED: The extension has passed all its contract tests. An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. FAILED: The extension has failed one or more contract tests. IN_PROGRESS: Contract tests are currently being performed on the extension. NOT_TESTED: Contract tests haven't been performed on the extension. public let typeTestsStatus: TypeTestsStatus? /// The description of the test status. To return the extension test status of a specific extension version, you must specify VersionId. This applies only to registered private extension versions. CloudFormation doesn't return this information for public extensions, whether they are activated in your account. public let typeTestsStatusDescription: String? @@ -3004,9 +3004,9 @@ extension CloudFormation { /// A list of Parameter structures that specify input parameters. @OptionalCustomCoding> public var parameters: [Parameter]? - /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.) Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used. + /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only TemplateBody is used. public let templateBody: String? - /// Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + /// Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. public let templateURL: String? @inlinable @@ -3166,7 +3166,7 @@ extension CloudFormation { } public struct GetStackPolicyOutput: AWSDecodableShape { - /// Structure containing the stack policy body. (For more information, go to Prevent Updates to Stack Resources in the CloudFormation User Guide.) + /// Structure containing the stack policy body. (For more information, see Prevent updates to stack resources in the CloudFormation User Guide.) public let stackPolicyBody: String? @inlinable @@ -3211,7 +3211,7 @@ extension CloudFormation { /// The stage of the template that you can retrieve. For stacks, the Original and Processed templates are always available. For change sets, the Original template is always available. After CloudFormation finishes creating the change set, the Processed template becomes available. @OptionalCustomCoding> public var stagesAvailable: [TemplateStage]? - /// Structure containing the template body. (For more information, go to Template Anatomy in the CloudFormation User Guide.) CloudFormation returns the same template that was used when the stack was created. + /// Structure containing the template body. CloudFormation returns the same template that was used when the stack was created. public let templateBody: String? @inlinable @@ -3233,11 +3233,11 @@ extension CloudFormation { public let stackName: String? /// The name or unique ID of the stack set from which the stack was created. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. public let stackSetName: String? - /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information about templates, see Template anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. + /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. public let templateBody: String? /// Specifies options for the GetTemplateSummary API action. public let templateSummaryConfig: TemplateSummaryConfig? - /// Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see Template anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. + /// Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL. public let templateURL: String? @inlinable @@ -3270,7 +3270,7 @@ extension CloudFormation { } public struct GetTemplateSummaryOutput: AWSDecodableShape { - /// The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in CloudFormation Templates. + /// The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error. For more information, see Acknowledging IAM resources in CloudFormation templates. @OptionalCustomCoding> public var capabilities: [Capability]? /// The list of resources that generated the values in the Capabilities response element. @@ -4426,7 +4426,7 @@ extension CloudFormation { } public struct ModuleInfo: AWSDecodableShape { - /// A concatenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /. In the following example, the resource was created from a module, moduleA, that's nested inside a parent module, moduleB. moduleA/moduleB For more information, see Referencing resources in a module in the CloudFormation User Guide. + /// A concatenated list of the logical IDs of the module or modules containing the resource. Modules are listed starting with the inner-most nested module, and separated by /. In the following example, the resource was created from a module, moduleA, that's nested inside a parent module, moduleB. moduleA/moduleB For more information, see Reference module resources in CloudFormation templates in the CloudFormation User Guide. public let logicalIdHierarchy: String? /// A concatenated list of the module type or types containing the resource. Module types are listed starting with the inner-most nested module, and separated by /. In the following example, the resource was created from a module of type AWS::First::Example::MODULE, that's nested inside a parent module of type AWS::Second::Example::MODULE. AWS::First::Example::MODULE/AWS::Second::Example::MODULE public let typeHierarchy: String? @@ -4498,7 +4498,7 @@ extension CloudFormation { public let parameterKey: String? /// The input value associated with the parameter. public let parameterValue: String? - /// Read-only. The value that corresponds to a SSM parameter key. This field is returned only for SSM parameter types in the template. + /// Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only for Systems Manager parameter types in the template. For more information, see Use CloudFormation-supplied parameter types in the CloudFormation User Guide. public let resolvedValue: String? /// During a stack update, use the existing parameter value that the stack is using for a given parameter key. If you specify true, do not specify a parameter value. public let usePreviousValue: Bool? @@ -4718,7 +4718,7 @@ extension CloudFormation { public struct RegisterPublisherInput: AWSEncodableShape { /// Whether you accept the Terms and Conditions for publishing extensions in the CloudFormation registry. You must accept the terms and conditions in order to register to publish public extensions to the CloudFormation registry. The default is false. public let acceptTermsAndConditions: Bool? - /// If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account. For more information, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User Guide. + /// If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account. For more information, see Prerequisite: Registering your account to publish CloudFormation extensions in the CloudFormation Command Line Interface (CLI) User Guide. public let connectionArn: String? @inlinable @@ -4760,7 +4760,7 @@ extension CloudFormation { public let executionRoleArn: String? /// Specifies logging configuration information for an extension. public let loggingConfig: LoggingConfig? - /// A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation CLI User Guide. The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide. + /// A URL to the S3 bucket containing the extension project package that contains the necessary files for the extension you want to register. For information about generating a schema handler package for the extension you want to register, see submit in the CloudFormation Command Line Interface (CLI) User Guide. The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs to have GetObject permissions for the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide. public let schemaHandlerPackage: String? /// The kind of extension. public let type: RegistryType? @@ -4817,7 +4817,7 @@ extension CloudFormation { } public struct RequiredActivatedType: AWSDecodableShape { - /// The type name of the public extension. If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide. + /// The type name of the public extension. If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Use aliases to refer to extensions in the CloudFormation User Guide. public let originalTypeName: String? /// The publisher ID of the extension publisher. public let publisherId: String? @@ -4863,7 +4863,7 @@ extension CloudFormation { public let physicalResourceId: String? /// The action that will be taken on the physical resource when the change set is executed. Delete The resource will be deleted. Retain The resource will be retained. Snapshot The resource will have a snapshot taken. ReplaceAndDelete The resource will be replaced and then deleted. ReplaceAndRetain The resource will be replaced and then retained. ReplaceAndSnapshot The resource will be replaced and then have a snapshot taken. public let policyAction: PolicyAction? - /// For the Modify action, indicates whether CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation property in the ResourceTargetDefinition structure. For example, if the RequiresRecreation field is Always and the Evaluation field is Static, Replacement is True. If the RequiresRecreation field is Always and the Evaluation field is Dynamic, Replacement is Conditionally. If you have multiple changes with different RequiresRecreation values, the Replacement value depends on the change with the most impact. A RequiresRecreation value of Always has the most impact, followed by Conditionally, and then Never. + /// For the Modify action, indicates whether CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation property in the ResourceTargetDefinition structure. For example, if the RequiresRecreation field is Always and the Evaluation field is Static, Replacement is True. If the RequiresRecreation field is Always and the Evaluation field is Dynamic, Replacement is Conditional. If you have multiple changes with different RequiresRecreation values, the Replacement value depends on the change with the most impact. A RequiresRecreation value of Always has the most impact, followed by Conditional, and then Never. public let replacement: Replacement? /// The type of CloudFormation resource, such as AWS::S3::Bucket. public let resourceType: String? @@ -4932,10 +4932,10 @@ extension CloudFormation { public struct ResourceDefinition: AWSEncodableShape { /// The logical resource id for this resource in the generated template. public let logicalResourceId: String? - /// A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development. + /// A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide. @OptionalCustomCoding> public var resourceIdentifier: [String: String]? - /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types in the CloudFormation User Guide + /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see Resource type support for imports and drift detection in the CloudFormation User Guide public let resourceType: String? @inlinable @@ -4968,14 +4968,14 @@ extension CloudFormation { public struct ResourceDetail: AWSDecodableShape { /// The logical id for this resource in the final generated template. public let logicalResourceId: String? - /// A list of up to 256 key-value pairs that identifies the resource in the generated template. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development. + /// A list of up to 256 key-value pairs that identifies the resource in the generated template. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide. @OptionalCustomCoding> public var resourceIdentifier: [String: String]? - /// Status of the processing of a resource in a generated template. InProgress The resource processing is still in progress. Complete The resource processing is complete. Pending The resource processing is pending. Failed The resource processing has failed. + /// Status of the processing of a resource in a generated template. InProgress The resource processing is still in progress. Complete The resource processing is complete. Pending The resource processing is pending. Failed The resource processing has failed. public let resourceStatus: GeneratedTemplateResourceStatus? /// The reason for the resource detail, providing more information if a failure happened. public let resourceStatusReason: String? - /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types In the CloudFormation User Guide + /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see Resource type support for imports and drift detection In the CloudFormation User Guide public let resourceType: String? /// The warnings generated for this resource. @OptionalCustomCoding> @@ -5034,7 +5034,7 @@ extension CloudFormation { public let resourceScanId: String? /// The time that the resource scan was started. public let startTime: Date? - /// Status of the resource scan. INPROGRESS The resource scan is still in progress. COMPLETE The resource scan is complete. EXPIRED The resource scan has expired. FAILED The resource scan has failed. + /// Status of the resource scan. INPROGRESS The resource scan is still in progress. COMPLETE The resource scan is complete. EXPIRED The resource scan has expired. FAILED The resource scan has failed. public let status: ResourceScanStatus? /// The reason for the resource scan status, providing more information if a failure happened. public let statusReason: String? @@ -5072,7 +5072,7 @@ extension CloudFormation { public let name: String? /// The property path of the property. public let path: String? - /// If the Attribute value is Properties, indicates whether a change to this property causes the resource to be recreated. The value can be Never, Always, or Conditionally. To determine the conditions for a Conditionally recreation, see the update behavior for that property in the CloudFormation User Guide. + /// If the Attribute value is Properties, indicates whether a change to this property causes the resource to be recreated. The value can be Never, Always, or Conditionally. To determine the conditions for a Conditionally recreation, see the update behavior for that property in the Amazon Web Services resource and property types reference in the CloudFormation User Guide. public let requiresRecreation: RequiresRecreation? @inlinable @@ -5103,7 +5103,7 @@ extension CloudFormation { /// A key-value pair that identifies the target resource. The key is an identifier property (for example, BucketName for AWS::S3::Bucket resources) and the value is the actual property value (for example, MyS3Bucket). @OptionalCustomCoding> public var resourceIdentifier: [String: String]? - /// The type of resource to import into your stack, such as AWS::S3::Bucket. For a list of supported resource types, see Resources that support import operations in the CloudFormation User Guide. + /// The type of resource to import into your stack, such as AWS::S3::Bucket. For a list of supported resource types, see Resource type support for imports and drift detection in the CloudFormation User Guide. public let resourceType: String? @inlinable @@ -5163,7 +5163,7 @@ extension CloudFormation { public let clientRequestToken: String? /// When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false public let retainExceptOnCreate: Bool? - /// The Amazon Resource Name (ARN) of an Identity and Access Management role that CloudFormation assumes to rollback the stack. + /// The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to rollback the stack. public let roleARN: String? /// The name that's associated with the stack. public let stackName: String? @@ -5229,10 +5229,10 @@ extension CloudFormation { public struct ScannedResource: AWSDecodableShape { /// If true, the resource is managed by a CloudFormation stack. public let managedByStack: Bool? - /// A list of up to 256 key-value pairs that identifies for the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development. + /// A list of up to 256 key-value pairs that identifies for the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide. @OptionalCustomCoding> public var resourceIdentifier: [String: String]? - /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see Resource type support In the CloudFormation User Guide + /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see Resource type support for imports and drift detection In the CloudFormation User Guide public let resourceType: String? @inlinable @@ -5250,10 +5250,10 @@ extension CloudFormation { } public struct ScannedResourceIdentifier: AWSEncodableShape { - /// A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development. + /// A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be "TableName": "MyDDBTable". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide. @OptionalCustomCoding> public var resourceIdentifier: [String: String]? - /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types In the CloudFormation User Guide. + /// The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see Resource type support for imports and drift detection In the CloudFormation User Guide. public let resourceType: String? @inlinable @@ -5276,7 +5276,7 @@ extension CloudFormation { public struct SetStackPolicyInput: AWSEncodableShape { /// The name or unique stack ID that you want to associate a policy with. public let stackName: String? - /// Structure containing the stack policy body. For more information, go to Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. + /// Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. public let stackPolicyBody: String? /// Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an Amazon S3 bucket in the same Amazon Web Services Region as the stack. The location for an Amazon S3 bucket must start with https://. You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not both. public let stackPolicyURL: String? @@ -5303,7 +5303,7 @@ extension CloudFormation { } public struct SetTypeConfigurationInput: AWSEncodableShape { - /// The configuration data for the extension, in this account and Region. The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining account-level configuration data for an extension in the CloudFormation CLI User Guide. + /// The configuration data for the extension, in this account and Region. The configuration data must be formatted as JSON, and validate against the schema returned in the ConfigurationSchema response element of DescribeType. For more information, see Defining the account-level configuration of an extension in the CloudFormation Command Line Interface (CLI) User Guide. public let configuration: String? /// An alias by which to refer to this extension configuration data. Conditional: Specifying a configuration alias is required when setting a configuration for a resource type extension. public let configurationAlias: String? @@ -5448,13 +5448,13 @@ extension CloudFormation { public let deletionTime: Date? /// A user-defined description associated with the stack. public let description: String? - /// The detailed status of the resource or stack. If CONFIGURATION_COMPLETE is present, the resource or resource configuration phase has completed and the stabilization of the resources is in progress. The stack sets CONFIGURATION_COMPLETE when all of the resources in the stack have reached that event. For more information, see CloudFormation stack deployment in the CloudFormation User Guide. + /// The detailed status of the resource or stack. If CONFIGURATION_COMPLETE is present, the resource or resource configuration phase has completed and the stabilization of the resources is in progress. The stack sets CONFIGURATION_COMPLETE when all of the resources in the stack have reached that event. For more information, see Understand CloudFormation stack creation events in the CloudFormation User Guide. public let detailedStatus: DetailedStatus? /// Boolean to enable or disable rollback on stack creation failures: true: disable rollback. false: enable rollback. public let disableRollback: Bool? - /// Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. + /// Information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. public let driftInformation: StackDriftInformation? - /// Whether termination protection is enabled for the stack. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. For more information, see Protecting a Stack From Being Deleted in the CloudFormation User Guide. + /// Whether termination protection is enabled for the stack. For nested stacks, termination protection is set on the root stack and can't be changed directly on the nested stack. For more information, see Protect a CloudFormation stack from being deleted in the CloudFormation User Guide. public let enableTerminationProtection: Bool? /// The time the stack was last updated. This field will only be returned if the stack has been updated at least once. public let lastUpdatedTime: Date? @@ -5467,15 +5467,15 @@ extension CloudFormation { /// A list of Parameter structures. @OptionalCustomCoding> public var parameters: [Parameter]? - /// For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack. For more information, see Working with Nested Stacks in the CloudFormation User Guide. + /// For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack. For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide. public let parentId: String? /// When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false public let retainExceptOnCreate: Bool? - /// The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that's associated with the stack. During a stack operation, CloudFormation uses this role's credentials to make calls on your behalf. + /// The Amazon Resource Name (ARN) of an IAM role that's associated with the stack. During a stack operation, CloudFormation uses this role's credentials to make calls on your behalf. public let roleARN: String? /// The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. public let rollbackConfiguration: RollbackConfiguration? - /// For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs. For more information, see Working with Nested Stacks in the CloudFormation User Guide. + /// For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs. For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide. public let rootId: String? /// Unique identifier of the stack. public let stackId: String? @@ -5588,7 +5588,7 @@ extension CloudFormation { public struct StackEvent: AWSDecodableShape { /// The token passed to the operation that generated this event. All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. public let clientRequestToken: String? - /// An optional field containing information about the detailed status of the stack event. CONFIGURATION_COMPLETE - all of the resources in the stack have reached that event. For more information, see CloudFormation stack deployment in the CloudFormation User Guide. VALIDATION_FAILED - template validation failed because of invalid properties in the template. The ResourceStatusReason field shows what properties are defined incorrectly. + /// An optional field containing information about the detailed status of the stack event. CONFIGURATION_COMPLETE - all of the resources in the stack have reached that event. For more information, see Understand CloudFormation stack creation events in the CloudFormation User Guide. VALIDATION_FAILED - template validation failed because of invalid properties in the template. The ResourceStatusReason field shows what properties are defined incorrectly. public let detailedStatus: DetailedStatus? /// The unique ID of this event. public let eventId: String? @@ -5612,7 +5612,7 @@ extension CloudFormation { public let resourceStatus: ResourceStatus? /// Success/failure message associated with the resource. public let resourceStatusReason: String? - /// Type of resource. (For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.) + /// Type of resource. For more information, see Amazon Web Services resource and property types reference in the CloudFormation User Guide. public let resourceType: String? /// The unique ID name of the instance of the stack. public let stackId: String? @@ -5771,7 +5771,7 @@ extension CloudFormation { /// Status of the actual configuration of the resource compared to its expected configuration. These will be present only for resources whose StackInstanceResourceDriftStatus is MODIFIED. @OptionalCustomCoding> public var propertyDifferences: [PropertyDifference]? - /// Type of resource. For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide. + /// Type of resource. For more information, see Amazon Web Services resource and property types reference in the CloudFormation User Guide. public let resourceType: String? /// The ID of the stack instance. public let stackId: String? @@ -5861,7 +5861,7 @@ extension CloudFormation { public struct StackResource: AWSDecodableShape { /// User defined description associated with the resource. public let description: String? - /// Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. + /// Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. public let driftInformation: StackResourceDriftInformation? /// The logical name of the resource specified in the template. public let logicalResourceId: String? @@ -5873,7 +5873,7 @@ extension CloudFormation { public let resourceStatus: ResourceStatus? /// Success/failure message associated with the resource. public let resourceStatusReason: String? - /// Type of resource. For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide. + /// Type of resource. For more information, see Amazon Web Services resource and property types reference in the CloudFormation User Guide. public let resourceType: String? /// Unique identifier of the stack. public let stackId: String? @@ -5915,13 +5915,13 @@ extension CloudFormation { public struct StackResourceDetail: AWSDecodableShape { /// User defined description associated with the resource. public let description: String? - /// Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. + /// Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. public let driftInformation: StackResourceDriftInformation? /// Time the status was updated. public let lastUpdatedTimestamp: Date? /// The logical name of the resource specified in the template. public let logicalResourceId: String? - /// The content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute in the CloudFormation User Guide. + /// The content of the Metadata attribute declared for the resource. For more information, see Metadata attribute in the CloudFormation User Guide. public let metadata: String? /// Contains information about the module from which the resource was created, if the resource was created from a module included in the stack template. public let moduleInfo: ModuleInfo? @@ -5931,7 +5931,7 @@ extension CloudFormation { public let resourceStatus: ResourceStatus? /// Success/failure message associated with the resource. public let resourceStatusReason: String? - /// Type of resource. For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide. + /// Type of resource. For more information, see Amazon Web Services resource and property types reference in the CloudFormation User Guide. public let resourceType: String? /// Unique identifier of the stack. public let stackId: String? @@ -6029,7 +6029,7 @@ extension CloudFormation { public struct StackResourceDriftInformation: AWSDecodableShape { /// When CloudFormation last checked if the resource had drifted from its expected configuration. public let lastCheckTimestamp: Date? - /// Status of the resource's actual configuration compared to its expected configuration DELETED: The resource differs from its expected configuration in that it has been deleted. MODIFIED: The resource differs from its expected configuration. NOT_CHECKED: CloudFormation has not checked if the resource differs from its expected configuration. Any resources that do not currently support drift detection have a status of NOT_CHECKED. For more information, see Resources that Support Drift Detection. IN_SYNC: The resource's actual configuration matches its expected configuration. + /// Status of the resource's actual configuration compared to its expected configuration DELETED: The resource differs from its expected configuration in that it has been deleted. MODIFIED: The resource differs from its expected configuration. NOT_CHECKED: CloudFormation has not checked if the resource differs from its expected configuration. Any resources that do not currently support drift detection have a status of NOT_CHECKED. For more information, see Resource type support for imports and drift detection. IN_SYNC: The resource's actual configuration matches its expected configuration. public let stackResourceDriftStatus: StackResourceDriftStatus? @inlinable @@ -6047,7 +6047,7 @@ extension CloudFormation { public struct StackResourceDriftInformationSummary: AWSDecodableShape { /// When CloudFormation last checked if the resource had drifted from its expected configuration. public let lastCheckTimestamp: Date? - /// Status of the resource's actual configuration compared to its expected configuration. DELETED: The resource differs from its expected configuration in that it has been deleted. MODIFIED: The resource differs from its expected configuration. NOT_CHECKED: CloudFormation hasn't checked if the resource differs from its expected configuration. Any resources that don't currently support drift detection have a status of NOT_CHECKED. For more information, see Resources that Support Drift Detection. If you performed an ContinueUpdateRollback operation on a stack, any resources included in ResourcesToSkip will also have a status of NOT_CHECKED. For more information about skipping resources during rollback operations, see Continue Rolling Back an Update in the CloudFormation User Guide. IN_SYNC: The resource's actual configuration matches its expected configuration. + /// Status of the resource's actual configuration compared to its expected configuration. DELETED: The resource differs from its expected configuration in that it has been deleted. MODIFIED: The resource differs from its expected configuration. NOT_CHECKED: CloudFormation hasn't checked if the resource differs from its expected configuration. Any resources that don't currently support drift detection have a status of NOT_CHECKED. For more information, see Resource type support for imports and drift detection. If you performed an ContinueUpdateRollback operation on a stack, any resources included in ResourcesToSkip will also have a status of NOT_CHECKED. For more information about skipping resources during rollback operations, see Continue rolling back an update in the CloudFormation User Guide. IN_SYNC: The resource's actual configuration matches its expected configuration. public let stackResourceDriftStatus: StackResourceDriftStatus? @inlinable @@ -6063,7 +6063,7 @@ extension CloudFormation { } public struct StackResourceSummary: AWSDecodableShape { - /// Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. + /// Information about whether the resource's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. public let driftInformation: StackResourceDriftInformationSummary? /// Time the status was updated. public let lastUpdatedTimestamp: Date? @@ -6077,7 +6077,7 @@ extension CloudFormation { public let resourceStatus: ResourceStatus? /// Success/failure message associated with the resource. public let resourceStatusReason: String? - /// Type of resource. (For more information, go to Amazon Web Services Resource Types Reference in the CloudFormation User Guide.) + /// Type of resource. (For more information, see Amazon Web Services resource and property types reference in the CloudFormation User Guide.) public let resourceType: String? @inlinable @@ -6109,7 +6109,7 @@ extension CloudFormation { public let administrationRoleARN: String? /// [Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit (OU). public let autoDeployment: AutoDeployment? - /// The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your Amazon Web Services account—for example, by creating new Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in CloudFormation Templates. + /// The capabilities that are allowed in the stack set. Some stack set templates might include resources that can affect permissions in your Amazon Web Services account—for example, by creating new Identity and Access Management (IAM) users. For more information, see Acknowledging IAM resources in CloudFormation templates. @OptionalCustomCoding> public var capabilities: [Capability]? /// A description of the stack set that you specify when the stack set is created or updated. @@ -6267,7 +6267,7 @@ extension CloudFormation { public let operationPreferences: StackSetOperationPreferences? /// For stack set operations of action type DELETE, specifies whether to remove the stack instances from the specified stack set, but doesn't delete the stacks. You can't re-associate a retained stack, or add an existing, saved stack to a new stack set. public let retainStacks: Bool? - /// Detailed information about the drift status of the stack set. This includes information about drift operations currently being performed on the stack set. This information will only be present for stack set operations whose Action type is DETECT_DRIFT. For more information, see Detecting Unmanaged Changes in Stack Sets in the CloudFormation User Guide. + /// Detailed information about the drift status of the stack set. This includes information about drift operations currently being performed on the stack set. This information will only be present for stack set operations whose Action type is DETECT_DRIFT. For more information, see Detect stack set drift in the CloudFormation User Guide. public let stackSetDriftDetectionDetails: StackSetDriftDetectionDetails? /// The ID of the stack set. public let stackSetId: String? @@ -6506,13 +6506,13 @@ extension CloudFormation { public let creationTime: Date? /// The time the stack was deleted. public let deletionTime: Date? - /// Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources. + /// Summarizes information about whether a stack's actual configuration differs, or has drifted, from its expected configuration, as defined in the stack template and any values specified as template parameters. For more information, see Detect unmanaged configuration changes to stacks and resources with drift detection. public let driftInformation: StackDriftInformationSummary? /// The time the stack was last updated. This field will only be returned if the stack has been updated at least once. public let lastUpdatedTime: Date? - /// For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack. For more information, see Working with Nested Stacks in the CloudFormation User Guide. + /// For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack. For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide. public let parentId: String? - /// For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs. For more information, see Working with Nested Stacks in the CloudFormation User Guide. + /// For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs. For more information, see Embed stacks within other stacks using nested stacks in the CloudFormation User Guide. public let rootId: String? /// Unique stack identifier. public let stackId: String? @@ -6622,7 +6622,7 @@ extension CloudFormation { } public struct Tag: AWSEncodableShape & AWSDecodableShape { - /// Required. A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services (Amazon Web Services) have the reserved prefix: aws:. + /// Required. A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services have the reserved prefix: aws:. public let key: String? /// Required. A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value. public let value: String? @@ -6771,7 +6771,7 @@ extension CloudFormation { public struct TestTypeInput: AWSEncodableShape { /// The Amazon Resource Name (ARN) of the extension. Conditional: You must specify Arn, or TypeName and Type. public let arn: String? - /// The S3 bucket to which CloudFormation delivers the contract test execution logs. CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED. The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions: GetObject PutObject For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Amazon Web Services Identity and Access Management User Guide. + /// The S3 bucket to which CloudFormation delivers the contract test execution logs. CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of PASSED or FAILED. The user calling TestType must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions: GetObject PutObject For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Identity and Access Management User Guide. public let logDeliveryBucket: String? /// The type of the extension to test. Conditional: You must specify Arn, or TypeName and Type. public let type: ThirdPartyType? @@ -6947,15 +6947,15 @@ extension CloudFormation { public let isActivated: Bool? /// When the specified extension version was registered. This applies only to: Private extensions you have registered in your account. For more information, see RegisterType. Public extensions you have activated in your account with auto-update specified. For more information, see ActivateType. For all other extension types, CloudFormation returns null. public let lastUpdated: Date? - /// For public extensions that have been activated for this account and Region, the latest version of the public extension that is available. For any extensions other than activated third-arty extensions, CloudFormation returns null. How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide. + /// For public extensions that have been activated for this account and Region, the latest version of the public extension that is available. For any extensions other than activated third-party extensions, CloudFormation returns null. How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Automatically use new versions of extensions in the CloudFormation User Guide. public let latestPublicVersion: String? - /// For public extensions that have been activated for this account and Region, the type name of the public extension. If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Specifying aliases to refer to extensions in the CloudFormation User Guide. + /// For public extensions that have been activated for this account and Region, the type name of the public extension. If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of the public extension. For more information, see Use aliases to refer to extensions in the CloudFormation User Guide. public let originalTypeName: String? - /// For public extensions that have been activated for this account and Region, the version of the public extension to be used for CloudFormation operations in this account and Region. How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide. + /// For public extensions that have been activated for this account and Region, the version of the public extension to be used for CloudFormation operations in this account and Region. How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Automatically use new versions of extensions in the CloudFormation User Guide. public let publicVersionNumber: String? /// The ID of the extension publisher, if the extension is published by a third party. Extensions published by Amazon don't return a publisher ID. public let publisherId: String? - /// The service used to verify the publisher identity. For more information, see Registering your account to publish CloudFormation extensions in the CFN-CLI User Guide for Extension Development. + /// The service used to verify the publisher identity. For more information, see Publishing extensions to make them available for public use in the CloudFormation Command Line Interface (CLI) User Guide. public let publisherIdentity: IdentityProvider? /// The publisher name, as defined in the public profile for that publisher in the service used to verify the publisher identity. public let publisherName: String? @@ -7007,7 +7007,7 @@ extension CloudFormation { public let description: String? /// Whether the specified extension version is set as the default version. This applies only to private extensions you have registered in your account, and extensions published by Amazon. For public third-party extensions, CloudFormation returns null. public let isDefaultVersion: Bool? - /// For public extensions that have been activated for this account and Region, the version of the public extension to be used for CloudFormation operations in this account and Region. For any extensions other than activated third-arty extensions, CloudFormation returns null. How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the CloudFormation User Guide. + /// For public extensions that have been activated for this account and Region, the version of the public extension to be used for CloudFormation operations in this account and Region. For any extensions other than activated third-party extensions, CloudFormation returns null. How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically updates the extension in this account and Region when a new version is released. For more information, see Automatically use new versions of extensions in the CloudFormation User Guide. public let publicVersionNumber: String? /// When the version was registered. public let timeCreated: Date? @@ -7107,7 +7107,7 @@ extension CloudFormation { } public struct UpdateStackInput: AWSEncodableShape { - /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability. You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Only one of the Capabilities and ResourceType parameters can be specified. + /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we suggest that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom processing on templates; this can include simple actions like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this, users typically create a change set from the processed template, so that they can review the changes resulting from the macros before actually updating the stack. If your stack template contains one or more macros, and you choose to update a stack directly from the processed template, without first reviewing the resulting changes in a change set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation. If you want to update a stack from a stack template that contains macros and nested stacks, you must update the stack directly from the template using this capability. You should only update stacks directly from a stack template that contains macros if you know what processing the macro performs. Each macro relies on an underlying Lambda service function for processing stack templates. Be aware that the Lambda function owner can update the function operation without CloudFormation being notified. For more information, see Perform custom processing on CloudFormation templates with template macros. Only one of the Capabilities and ResourceType parameters can be specified. @OptionalCustomCoding> public var capabilities: [Capability]? /// A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry UpdateStack requests to ensure that CloudFormation successfully received them. All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1. In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002. @@ -7120,12 +7120,12 @@ extension CloudFormation { /// A list of Parameter structures that specify input parameters for the stack. For more information, see the Parameter data type. @OptionalCustomCoding> public var parameters: [Parameter]? - /// The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. + /// The template resource types that you have permissions to work with for this update stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. If the list of resource types doesn't include a resource that you're updating, the stack update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this parameter for CloudFormation-specific condition keys in IAM policies. For more information, see Control access with Identity and Access Management. Only one of the Capabilities and ResourceType parameters can be specified. @OptionalCustomCoding> public var resourceTypes: [String]? /// When set to true, newly created resources are deleted when the operation rolls back. This includes newly created resources marked with a deletion policy of Retain. Default: false public let retainExceptOnCreate: Bool? - /// The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. + /// The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege. If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is available, CloudFormation uses a temporary session that is generated from your user credentials. public let roleARN: String? /// The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards. public let rollbackConfiguration: RollbackConfiguration? @@ -7142,9 +7142,9 @@ extension CloudFormation { /// Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags. If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags. @OptionalCustomCoding> public var tags: [Tag]? - /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. (For more information, go to Template Anatomy in the CloudFormation User Guide.) Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. + /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. public let templateBody: String? - /// Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. + /// Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. public let templateURL: String? /// Reuse the existing template that is associated with the stack that you are updating. Conditional: You must specify only one of the following parameters: TemplateBody, TemplateURL, or set the UsePreviousTemplate to true. public let usePreviousTemplate: Bool? @@ -7320,7 +7320,7 @@ extension CloudFormation { public let autoDeployment: AutoDeployment? /// [Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account. By default, SELF is specified. Use SELF for stack sets with self-managed permissions. If you are signed in to the management account, specify SELF. If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN. Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated administrator in the CloudFormation User Guide. public let callAs: CallAs? - /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account; for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM Resources in CloudFormation Templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. + /// In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order for CloudFormation to update the stack set and its associated stack instances. CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include resources that can affect permissions in your Amazon Web Services account, for example, by creating new IAM users. For those stacks sets, you must explicitly acknowledge this by specifying one of these capabilities. The following IAM resources require you to specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. If you have IAM resources, you can specify either capability. If you have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If you don't specify either of these capabilities, CloudFormation returns an InsufficientCapabilities error. If your stack template contains these resources, we recommend that you review all permissions associated with them and edit their permissions if necessary. AWS::IAM::AccessKey AWS::IAM::Group AWS::IAM::InstanceProfile AWS::IAM::Policy AWS::IAM::Role AWS::IAM::User AWS::IAM::UserToGroupAddition For more information, see Acknowledging IAM resources in CloudFormation templates. CAPABILITY_AUTO_EXPAND Some templates reference macros. If your stack set template references one or more macros, you must update the stack set directly from the processed template, without first reviewing the resulting changes in a change set. To update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to Perform Custom Processing on Templates. Stack sets with service-managed permissions do not currently support the use of macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions, if you reference a macro in your template the stack set operation will fail. @OptionalCustomCoding> public var capabilities: [Capability]? /// [Service-managed permissions] The Organizations accounts in which to update associated stack instances. To update all the stack instances associated with this stack set, do not specify DeploymentTargets or Regions. If the stack set update includes changes to the template (that is, if TemplateBody or TemplateURL is specified), or the Parameters, CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and Amazon Web Services Regions. If the stack set update doesn't include changes to the template or parameters, CloudFormation updates the stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack instance status. @@ -7348,9 +7348,9 @@ extension CloudFormation { /// The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags. If you specify tags for this parameter, those tags replace any list of tags that are currently associated with this stack set. This means: If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify any tags using this parameter, you must specify all the tags that you want associated with this stack set, even tags you've specified before (for example, when creating the stack set or during a previous update of the stack set.). Any tags that you don't include in the updated list of tags are removed from the stack set, and therefore from the stacks and resources as well. If you specify an empty value, CloudFormation removes all currently associated tags. If you specify new tags as part of an UpdateStackSet action, CloudFormation checks to see if you have the required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from the list of tags you specify, CloudFormation assumes that you want to remove those tags from the stack set, and checks to see if you have permission to untag resources. If you don't have the necessary permission(s), the entire UpdateStackSet action fails with an access denied error, and the stack set is not updated. @OptionalCustomCoding> public var tags: [Tag]? - /// The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. + /// The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. public let templateBody: String? - /// The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, see Template Anatomy in the CloudFormation User Guide. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. + /// The location of the file that contains the template body. The URL must point to a template (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. public let templateURL: String? /// Use the existing template that's associated with the stack set that you're updating. Conditional: You must specify only one of the following parameters: TemplateBody or TemplateURL—or set UsePreviousTemplate to true. public let usePreviousTemplate: Bool? @@ -7481,9 +7481,9 @@ extension CloudFormation { } public struct ValidateTemplateInput: AWSEncodableShape { - /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to Template Anatomy in the CloudFormation User Guide. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + /// Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. public let templateBody: String? - /// Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. + /// Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must start with https://. Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only TemplateBody is used. public let templateURL: String? @inlinable @@ -7505,7 +7505,7 @@ extension CloudFormation { } public struct ValidateTemplateOutput: AWSDecodableShape { - /// The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error. For more information, see Acknowledging IAM Resources in CloudFormation Templates. + /// The capabilities found within the template. If your template contains IAM resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities error. For more information, see Acknowledging IAM resources in CloudFormation templates. @OptionalCustomCoding> public var capabilities: [Capability]? /// The list of resources that generated the values in the Capabilities response element. @@ -7541,7 +7541,7 @@ extension CloudFormation { /// The properties of the resource that are impacted by this warning. @OptionalCustomCoding> public var properties: [WarningProperty]? - /// The type of this warning. For more information, see IaC generator and write-only properties in the CloudFormation User Guide. MUTUALLY_EXCLUSIVE_PROPERTIES - The resource requires mutually-exclusive write-only properties. The IaC generator selects one set of mutually exclusive properties and converts the included properties into parameters. The parameter names have a suffix OneOf and the parameter descriptions indicate that the corresponding property can be replaced with other exclusive properties. UNSUPPORTED_PROPERTIES - Unsupported properties are present in the resource. One example of unsupported properties would be a required write-only property that is an array, because a parameter cannot be an array. Another example is an optional write-only property. MUTUALLY_EXCLUSIVE_TYPES - One or more required write-only properties are found in the resource, and the type of that property can be any of several types. Currently the resource and property reference documentation does not indicate if a property uses a type of oneOf or anyOf. You need to look at the resource provider schema. + /// The type of this warning. For more information, see Resolve write-only properties in the CloudFormation User Guide. MUTUALLY_EXCLUSIVE_PROPERTIES - The resource requires mutually-exclusive write-only properties. The IaC generator selects one set of mutually exclusive properties and converts the included properties into parameters. The parameter names have a suffix OneOf and the parameter descriptions indicate that the corresponding property can be replaced with other exclusive properties. UNSUPPORTED_PROPERTIES - Unsupported properties are present in the resource. One example of unsupported properties would be a required write-only property that is an array, because a parameter cannot be an array. Another example is an optional write-only property. MUTUALLY_EXCLUSIVE_TYPES - One or more required write-only properties are found in the resource, and the type of that property can be any of several types. Currently the resource and property reference documentation does not indicate if a property uses a type of oneOf or anyOf. You need to look at the resource provider schema. public let type: WarningType? @inlinable diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift index 2405a51981..41399577be 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_api.swift @@ -638,7 +638,7 @@ public struct CloudTrail: AWSService { return try await self.getEventDataStore(input, logger: logger) } - /// Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following: If your event selector includes read-only events, write-only events, or all events. This applies to both management events and data events. If your event selector includes management events. If your event selector includes data events, the resources on which you are logging data events. For more information about logging management and data events, see the following topics in the CloudTrail User Guide: Logging management events Logging data events + /// Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following: If your event selector includes read-only events, write-only events, or all events. This applies to management events, data events, and network activity events. If your event selector includes management events. If your event selector includes network activity events, the event sources for which you are logging network activity events. If your event selector includes data events, the resources on which you are logging data events. For more information about logging management, data, and network activity events, see the following topics in the CloudTrail User Guide: Logging management events Logging data events Logging network activity events @Sendable @inlinable public func getEventSelectors(_ input: GetEventSelectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetEventSelectorsResponse { @@ -651,7 +651,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following: If your event selector includes read-only events, write-only events, or all events. This applies to both management events and data events. If your event selector includes management events. If your event selector includes data events, the resources on which you are logging data events. For more information about logging management and data events, see the following topics in the CloudTrail User Guide: Logging management events Logging data events + /// Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following: If your event selector includes read-only events, write-only events, or all events. This applies to management events, data events, and network activity events. If your event selector includes management events. If your event selector includes network activity events, the event sources for which you are logging network activity events. If your event selector includes data events, the resources on which you are logging data events. For more information about logging management, data, and network activity events, see the following topics in the CloudTrail User Guide: Logging management events Logging data events Logging network activity events /// /// Parameters: /// - trailName: Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail @@ -1227,7 +1227,7 @@ public struct CloudTrail: AWSService { return try await self.lookupEvents(input, logger: logger) } - /// Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide. + /// Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. You can use AdvancedEventSelectors to log management events, data events for all resource types, and network activity events. You can use EventSelectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use EventSelectors to log network activity events. If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want to log write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide. @Sendable @inlinable public func putEventSelectors(_ input: PutEventSelectorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutEventSelectorsResponse { @@ -1240,11 +1240,11 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide. + /// Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. You can use AdvancedEventSelectors to log management events, data events for all resource types, and network activity events. You can use EventSelectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use EventSelectors to log network activity events. If you want your trail to log Insights events, be sure the event selector or advanced event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events or network activity events. When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event. Example You create an event selector for a trail and specify that you want to log write-only events. The EC2 GetConsoleOutput and RunInstances API operations occur in your account. CloudTrail evaluates whether the events match your event selectors. The RunInstances is a write-only event and it matches your event selector. The trail logs the event. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event. The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown. You can configure up to five event selectors for each trail. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see Logging management events, Logging data events, Logging network activity events, and Quotas in CloudTrail in the CloudTrail User Guide. /// /// Parameters: - /// - advancedEventSelectors: Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide. - /// - eventSelectors: Specifies the settings for your event selectors. You can configure up to five event selectors for a trail. You can use either EventSelectors or AdvancedEventSelectors in a PutEventSelectors request, but not both. If you apply EventSelectors to a trail, any existing AdvancedEventSelectors are overwritten. + /// - advancedEventSelectors: Specifies the settings for advanced event selectors. You can use advanced event selectors to log management events, data events for all resource types, and network activity events. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events and Logging network activity events in the CloudTrail User Guide. + /// - eventSelectors: Specifies the settings for your event selectors. You can use event selectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use event selectors to log network activity events. You can configure up to five event selectors for a trail. You can use either EventSelectors or AdvancedEventSelectors in a PutEventSelectors request, but not both. If you apply EventSelectors to a trail, any existing AdvancedEventSelectors are overwritten. /// - trailName: Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the following format. arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail /// - logger: Logger use during operation @inlinable @@ -1422,7 +1422,7 @@ public struct CloudTrail: AWSService { return try await self.restoreEventDataStore(input, logger: logger) } - /// Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, or ConfigurationItem. + /// Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem. @Sendable @inlinable public func startEventDataStoreIngestion(_ input: StartEventDataStoreIngestionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartEventDataStoreIngestionResponse { @@ -1435,7 +1435,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, or ConfigurationItem. + /// Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem. /// /// Parameters: /// - eventDataStore: The ARN (or ID suffix of the ARN) of the event data store for which you want to start ingestion. @@ -1559,7 +1559,7 @@ public struct CloudTrail: AWSService { return try await self.startQuery(input, logger: logger) } - /// Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, or ConfigurationItem. + /// Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem. @Sendable @inlinable public func stopEventDataStoreIngestion(_ input: StopEventDataStoreIngestionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StopEventDataStoreIngestionResponse { @@ -1572,7 +1572,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, or ConfigurationItem. + /// Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem. /// /// Parameters: /// - eventDataStore: The ARN (or ID suffix of the ARN) of the event data store for which you want to stop ingestion. @@ -1681,7 +1681,7 @@ public struct CloudTrail: AWSService { return try await self.updateChannel(input, logger: logger) } - /// Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled. For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management or data events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store. + /// Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled. For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management, data, or network activity events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store. @Sendable @inlinable public func updateEventDataStore(_ input: UpdateEventDataStoreRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateEventDataStoreResponse { @@ -1694,7 +1694,7 @@ public struct CloudTrail: AWSService { logger: logger ) } - /// Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled. For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management or data events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store. + /// Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled. For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management, data, or network activity events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store. /// /// Parameters: /// - advancedEventSelectors: The advanced event selectors used to select events for the event data store. You can configure up to five advanced event selectors for each event data store. diff --git a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift index a3ae93fcb9..5446f425ca 100644 --- a/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift +++ b/Sources/Soto/Services/CloudTrail/CloudTrail_shapes.swift @@ -194,7 +194,7 @@ extension CloudTrail { public let endsWith: [String]? /// An operator that includes events that match the exact value of the event record field specified as the value of Field. This is the only valid operator that you can use with the readOnly, eventCategory, and resources.type fields. public let equals: [String]? - /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource. For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory. readOnly - Optional. Can be set to Equals a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events. eventSource - For filtering management events only. This can be set to NotEquals kms.amazonaws.com or NotEquals rdsdata.amazonaws.com. eventName - Can use any operator. You can use it to filter in or filter out any data event logged to CloudTrail, such as PutBucket or GetSnapshotBlock. You can have multiple values for this field, separated by commas. eventCategory - This is required and must be set to Equals. For CloudTrail management events, the value must be Management. For CloudTrail data events, the value must be Data. The following are used only for event data stores: For CloudTrail Insights events, the value must be Insight. For Config configuration items, the value must be ConfigurationItem. For Audit Manager evidence, the value must be Evidence. For non-Amazon Web Services events, the value must be ActivityAuditLog. resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator, and the value can be one of the following: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object AWS::AppConfig::Configuration AWS::B2BI::Transformer AWS::Bedrock::AgentAlias AWS::Bedrock::KnowledgeBase AWS::Cassandra::Table AWS::CloudFront::KeyValueStore AWS::CloudTrail::Channel AWS::CodeWhisperer::Customization AWS::CodeWhisperer::Profile AWS::Cognito::IdentityPool AWS::DynamoDB::Stream AWS::EC2::Snapshot AWS::EMRWAL::Workspace AWS::FinSpace::Environment AWS::Glue::Table AWS::GreengrassV2::ComponentVersion AWS::GreengrassV2::Deployment AWS::GuardDuty::Detector AWS::IoT::Certificate AWS::IoT::Thing AWS::IoTSiteWise::Asset AWS::IoTSiteWise::TimeSeries AWS::IoTTwinMaker::Entity AWS::IoTTwinMaker::Workspace AWS::KendraRanking::ExecutionPlan AWS::KinesisVideo::Stream AWS::ManagedBlockchain::Network AWS::ManagedBlockchain::Node AWS::MedicalImaging::Datastore AWS::NeptuneGraph::Graph AWS::PCAConnectorAD::Connector AWS::QApps:QApp AWS::QBusiness::Application AWS::QBusiness::DataSource AWS::QBusiness::Index AWS::QBusiness::WebExperience AWS::RDS::DBCluster AWS::S3::AccessPoint AWS::S3ObjectLambda::AccessPoint AWS::S3Outposts::Object AWS::SageMaker::Endpoint AWS::SageMaker::ExperimentTrialComponent AWS::SageMaker::FeatureGroup AWS::ServiceDiscovery::Namespace AWS::ServiceDiscovery::Service AWS::SCN::Instance AWS::SNS::PlatformEndpoint AWS::SNS::Topic AWS::SQS::Queue AWS::SSM::ManagedNode AWS::SSMMessages::ControlChannel AWS::SWF::Domain AWS::ThinClient::Device AWS::ThinClient::Environment AWS::Timestream::Database AWS::Timestream::Table AWS::VerifiedPermissions::PolicyStore AWS::XRay::Trace You can have only one resources.type field per selector. To log data events on more than one resource type, add another selector. resources.ARN - You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. You can't use the resources.ARN field to filter resource types that do not have ARNs. The resources.ARN field can be set one of the following. If resources.type equals AWS::S3::Object, the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value. The trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information. arn::s3:::/ arn::s3:::// When resources.type equals AWS::DynamoDB::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::dynamodb:::table/ When resources.type equals AWS::Lambda::Function, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::lambda:::function: When resources.type equals AWS::AppConfig::Configuration, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::appconfig:::application//environment//configuration/ When resources.type equals AWS::B2BI::Transformer, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::b2bi:::transformer/ When resources.type equals AWS::Bedrock::AgentAlias, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::bedrock:::agent-alias// When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::bedrock:::knowledge-base/ When resources.type equals AWS::Cassandra::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cassandra:::/keyspace//table/ When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cloudfront:::key-value-store/ When resources.type equals AWS::CloudTrail::Channel, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cloudtrail:::channel/ When resources.type equals AWS::CodeWhisperer::Customization, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::codewhisperer:::customization/ When resources.type equals AWS::CodeWhisperer::Profile, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::codewhisperer:::profile/ When resources.type equals AWS::Cognito::IdentityPool, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::cognito-identity:::identitypool/ When resources.type equals AWS::DynamoDB::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::dynamodb:::table//stream/ When resources.type equals AWS::EC2::Snapshot, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::ec2:::snapshot/ When resources.type equals AWS::EMRWAL::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::emrwal:::workspace/ When resources.type equals AWS::FinSpace::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::finspace:::environment/ When resources.type equals AWS::Glue::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::glue:::table// When resources.type equals AWS::GreengrassV2::ComponentVersion, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::greengrass:::components/ When resources.type equals AWS::GreengrassV2::Deployment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::greengrass:::deployments/ When resources.type equals AWS::GuardDuty::Detector, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::guardduty:::detector/ When resources.type equals AWS::IoT::Certificate, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iot:::cert/ When resources.type equals AWS::IoT::Thing, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iot:::thing/ When resources.type equals AWS::IoTSiteWise::Asset, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iotsitewise:::asset/ When resources.type equals AWS::IoTSiteWise::TimeSeries, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iotsitewise:::timeseries/ When resources.type equals AWS::IoTTwinMaker::Entity, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iottwinmaker:::workspace//entity/ When resources.type equals AWS::IoTTwinMaker::Workspace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::iottwinmaker:::workspace/ When resources.type equals AWS::KendraRanking::ExecutionPlan, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::kendra-ranking:::rescore-execution-plan/ When resources.type equals AWS::KinesisVideo::Stream, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::kinesisvideo:::stream// When resources.type equals AWS::ManagedBlockchain::Network, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::managedblockchain:::networks/ When resources.type equals AWS::ManagedBlockchain::Node, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::managedblockchain:::nodes/ When resources.type equals AWS::MedicalImaging::Datastore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::medical-imaging:::datastore/ When resources.type equals AWS::NeptuneGraph::Graph, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::neptune-graph:::graph/ When resources.type equals AWS::PCAConnectorAD::Connector, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::pca-connector-ad:::connector/ When resources.type equals AWS::QApps:QApp, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qapps:::application//qapp/ When resources.type equals AWS::QBusiness::Application, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application/ When resources.type equals AWS::QBusiness::DataSource, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//index//data-source/ When resources.type equals AWS::QBusiness::Index, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//index/ When resources.type equals AWS::QBusiness::WebExperience, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::qbusiness:::application//web-experience/ When resources.type equals AWS::RDS::DBCluster, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::rds:::cluster/ When resources.type equals AWS::S3::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don’t include the object path, and use the StartsWith or NotStartsWith operators. arn::s3:::accesspoint/ arn::s3:::accesspoint//object/ When resources.type equals AWS::S3ObjectLambda::AccessPoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::s3-object-lambda:::accesspoint/ When resources.type equals AWS::S3Outposts::Object, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::s3-outposts::: When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::endpoint/ When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::experiment-trial-component/ When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sagemaker:::feature-group/ When resources.type equals AWS::SCN::Instance, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::scn:::instance/ When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::servicediscovery:::namespace/ When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::servicediscovery:::service/ When resources.type equals AWS::SNS::PlatformEndpoint, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sns:::endpoint/// When resources.type equals AWS::SNS::Topic, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sns::: When resources.type equals AWS::SQS::Queue, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::sqs::: When resources.type equals AWS::SSM::ManagedNode, and the operator is set to Equals or NotEquals, the ARN must be in one of the following formats: arn::ssm:::managed-instance/ arn::ec2:::instance/ When resources.type equals AWS::SSMMessages::ControlChannel, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::ssmmessages:::control-channel/ When resources.type equals AWS::SWF::Domain, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::swf:::domain/ When resources.type equals AWS::ThinClient::Device, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::thinclient:::device/ When resources.type equals AWS::ThinClient::Environment, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::thinclient:::environment/ When resources.type equals AWS::Timestream::Database, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::timestream:::database/ When resources.type equals AWS::Timestream::Table, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::timestream:::database//table/ When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is set to Equals or NotEquals, the ARN must be in the following format: arn::verifiedpermissions:::policy-store/ + /// A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported. For CloudTrail management events, supported fields include eventCategory (required), eventSource, and readOnly. For CloudTrail data events, supported fields include eventCategory (required), resources.type (required), eventName, readOnly, and resources.ARN. For CloudTrail network activity events, supported fields include eventCategory (required), eventSource (required), eventName, errorCode, and vpcEndpointId. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory. readOnly - This is an optional field that is only used for management events and data events. This field can be set to Equals with a value of true or false. If you do not add this field, CloudTrail logs both read and write events. A value of true logs only read events. A value of false logs only write events. eventSource - This field is only used for management events and network activity events. For management events, this is an optional field that can be set to NotEquals kms.amazonaws.com to exclude KMS management events, or NotEquals rdsdata.amazonaws.com to exclude RDS management events. For network activity events, this is a required field that only uses the Equals operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source. The following are valid values for network activity events: cloudtrail.amazonaws.com ec2.amazonaws.com kms.amazonaws.com secretsmanager.amazonaws.com eventName - This is an optional field that is only used for data events and network activity events. You can use any operator with eventName. You can use it to filter in or filter out specific events. You can have multiple values for this field, separated by commas. eventCategory - This field is required and must be set to Equals. For CloudTrail management events, the value must be Management. For CloudTrail data events, the value must be Data. For CloudTrail network activity events, the value must be NetworkActivity. The following are used only for event data stores: For CloudTrail Insights events, the value must be Insight. For Config configuration items, the value must be ConfigurationItem. For Audit Manager evidence, the value must be Evidence. For non-Amazon Web Services events, the value must be ActivityAuditLog. errorCode - This field is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid errorCode is VpceAccessDenied. errorCode can only use the Equals operator. resources.type - This field is required for CloudTrail data events. resources.type can only use the Equals operator. The value can be one of the following: AWS::AppConfig::Configuration AWS::B2BI::Transformer AWS::Bedrock::AgentAlias AWS::Bedrock::FlowAlias AWS::Bedrock::Guardrail AWS::Bedrock::KnowledgeBase AWS::Cassandra::Table AWS::CloudFront::KeyValueStore AWS::CloudTrail::Channel AWS::CloudWatch::Metric AWS::CodeWhisperer::Customization AWS::CodeWhisperer::Profile AWS::Cognito::IdentityPool AWS::DynamoDB::Stream AWS::DynamoDB::Table AWS::EC2::Snapshot AWS::EMRWAL::Workspace AWS::FinSpace::Environment AWS::Glue::Table AWS::GreengrassV2::ComponentVersion AWS::GreengrassV2::Deployment AWS::GuardDuty::Detector AWS::IoT::Certificate AWS::IoT::Thing AWS::IoTSiteWise::Asset AWS::IoTSiteWise::TimeSeries AWS::IoTTwinMaker::Entity AWS::IoTTwinMaker::Workspace AWS::KendraRanking::ExecutionPlan AWS::Kinesis::Stream AWS::Kinesis::StreamConsumer AWS::KinesisVideo::Stream AWS::Lambda::Function AWS::MachineLearning::MlModel AWS::ManagedBlockchain::Network AWS::ManagedBlockchain::Node AWS::MedicalImaging::Datastore AWS::NeptuneGraph::Graph AWS::One::UKey AWS::One::User AWS::PaymentCryptography::Alias AWS::PaymentCryptography::Key AWS::PCAConnectorAD::Connector AWS::PCAConnectorSCEP::Connector AWS::QApps:QApp AWS::QBusiness::Application AWS::QBusiness::DataSource AWS::QBusiness::Index AWS::QBusiness::WebExperience AWS::RDS::DBCluster AWS::RUM::AppMonitor AWS::S3::AccessPoint AWS::S3::Object AWS::S3Express::Object AWS::S3ObjectLambda::AccessPoint AWS::S3Outposts::Object AWS::SageMaker::Endpoint AWS::SageMaker::ExperimentTrialComponent AWS::SageMaker::FeatureGroup AWS::ServiceDiscovery::Namespace AWS::ServiceDiscovery::Service AWS::SCN::Instance AWS::SNS::PlatformEndpoint AWS::SNS::Topic AWS::SQS::Queue AWS::SSM::ManagedNode AWS::SSMMessages::ControlChannel AWS::StepFunctions::StateMachine AWS::SWF::Domain AWS::ThinClient::Device AWS::ThinClient::Environment AWS::Timestream::Database AWS::Timestream::Table AWS::VerifiedPermissions::PolicyStore AWS::XRay::Trace You can have only one resources.type field per selector. To log events on more than one resource type, add another selector. resources.ARN - The resources.ARN is an optional field for data events. You can use any operator with resources.ARN, but if you use Equals or NotEquals, the value must exactly match the ARN of a valid resource of the type you've specified in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the StartsWith operator, and include only the bucket ARN as the matching value. For information about filtering data events on the resources.ARN field, see Filtering data events by resources.ARN in the CloudTrail User Guide. You can't use the resources.ARN field to filter resource types that do not have ARNs. vpcEndpointId - This field is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with vpcEndpointId. public let field: String /// An operator that excludes events that match the last few characters of the event record field specified as the value of Field. public let notEndsWith: [String]? @@ -686,7 +686,7 @@ extension CloudTrail { public struct DataResource: AWSEncodableShape & AWSDecodableShape { /// The resource type in which you want to log data events. You can specify the following basic event selector resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object Additional resource types are available through advanced event selectors. For more information about these additional resource types, see AdvancedFieldSelector. public let type: String? - /// An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type. To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3. This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::bucket-1/example-images. The trail logs data events for objects in this S3 bucket that match the prefix. To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda. This also enables logging of Invoke activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account. To log data events for a specific Lambda function, specify the function ARN. Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2. To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb. + /// An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type. To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3. This also enables logging of data event activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a bucket that belongs to another Amazon Web Services account. To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as arn:aws:s3:::amzn-s3-demo-bucket1/. The trail logs data events for all objects in this S3 bucket. To log data events for specific objects, specify the S3 bucket and object prefix such as arn:aws:s3:::amzn-s3-demo-bucket1/example-images. The trail logs data events for objects in this S3 bucket that match the prefix. To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda. This also enables logging of Invoke activity performed by any user or role in your Amazon Web Services account, even if that activity is performed on a function that belongs to another Amazon Web Services account. To log data events for a specific Lambda function, specify the function ARN. Lambda function ARNs are exact. For example, if you specify a function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2. To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb. public let values: [String]? @inlinable @@ -1150,7 +1150,7 @@ extension CloudTrail { } public struct EventSelector: AWSEncodableShape & AWSDecodableShape { - /// CloudTrail supports data event logging for Amazon S3 objects, Lambda functions, and Amazon DynamoDB tables with basic event selectors. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events. For more information, see Data Events and Limits in CloudTrail in the CloudTrail User Guide. + /// CloudTrail supports data event logging for Amazon S3 objects in standard S3 buckets, Lambda functions, and Amazon DynamoDB tables with basic event selectors. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events. For more information, see Data Events and Limits in CloudTrail in the CloudTrail User Guide. To log data events for all other resource types including objects stored in directory buckets, you must use AdvancedEventSelectors. You must also use AdvancedEventSelectors if you want to filter on the eventName field. public let dataResources: [DataResource]? /// An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out Key Management Service or Amazon RDS Data API events by containing kms.amazonaws.com or rdsdata.amazonaws.com. By default, ExcludeManagementEventSources is empty, and KMS and Amazon RDS Data API events are logged to your trail. You can exclude management event sources only in Regions that support the event source. public let excludeManagementEventSources: [String]? @@ -2451,9 +2451,9 @@ extension CloudTrail { } public struct PutEventSelectorsRequest: AWSEncodableShape { - /// Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide. + /// Specifies the settings for advanced event selectors. You can use advanced event selectors to log management events, data events for all resource types, and network activity events. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events and Logging network activity events in the CloudTrail User Guide. public let advancedEventSelectors: [AdvancedEventSelector]? - /// Specifies the settings for your event selectors. You can configure up to five event selectors for a trail. You can use either EventSelectors or AdvancedEventSelectors in a PutEventSelectors request, but not both. If you apply EventSelectors to a trail, any existing AdvancedEventSelectors are overwritten. + /// Specifies the settings for your event selectors. You can use event selectors to log management events and data events for the following resource types: AWS::DynamoDB::Table AWS::Lambda::Function AWS::S3::Object You can't use event selectors to log network activity events. You can configure up to five event selectors for a trail. You can use either EventSelectors or AdvancedEventSelectors in a PutEventSelectors request, but not both. If you apply EventSelectors to a trail, any existing AdvancedEventSelectors are overwritten. public let eventSelectors: [EventSelector]? /// Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements: Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) Start with a letter or number, and end with a letter or number Be between 3 and 128 characters Have no adjacent periods, underscores or dashes. Names like my-_namespace and my--namespace are not valid. Not be in IP address format (for example, 192.168.5.4) If you specify a trail ARN, it must be in the following format. arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail public let trailName: String diff --git a/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift b/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift index 25e29e31b6..673f3bd2ca 100644 --- a/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift +++ b/Sources/Soto/Services/CodeArtifact/CodeArtifact_api.swift @@ -1078,6 +1078,7 @@ public struct CodeArtifact: AWSService { /// Parameters: /// - domain: The name of the domain that contains the repository. /// - domainOwner: The 12-digit account number of the Amazon Web Services account that owns the domain that contains the repository. It does not include dashes or spaces. + /// - endpointType: A string that specifies the type of endpoint. /// - format: Returns which endpoint of a repository to return. A repository has one endpoint for each package format. /// - repository: The name of the repository. /// - logger: Logger use during operation @@ -1085,6 +1086,7 @@ public struct CodeArtifact: AWSService { public func getRepositoryEndpoint( domain: String, domainOwner: String? = nil, + endpointType: EndpointType? = nil, format: PackageFormat, repository: String, logger: Logger = AWSClient.loggingDisabled @@ -1092,6 +1094,7 @@ public struct CodeArtifact: AWSService { let input = GetRepositoryEndpointRequest( domain: domain, domainOwner: domainOwner, + endpointType: endpointType, format: format, repository: repository ) diff --git a/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift b/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift index eca4dbfbe7..ac1de333f4 100644 --- a/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift +++ b/Sources/Soto/Services/CodeArtifact/CodeArtifact_shapes.swift @@ -44,6 +44,12 @@ extension CodeArtifact { public var description: String { return self.rawValue } } + public enum EndpointType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dualstack = "dualstack" + case ipv4 = "ipv4" + public var description: String { return self.rawValue } + } + public enum ExternalConnectionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "Available" public var description: String { return self.rawValue } @@ -1913,15 +1919,18 @@ extension CodeArtifact { public let domain: String /// The 12-digit account number of the Amazon Web Services account that owns the domain that contains the repository. It does not include dashes or spaces. public let domainOwner: String? + /// A string that specifies the type of endpoint. + public let endpointType: EndpointType? /// Returns which endpoint of a repository to return. A repository has one endpoint for each package format. public let format: PackageFormat /// The name of the repository. public let repository: String @inlinable - public init(domain: String, domainOwner: String? = nil, format: PackageFormat, repository: String) { + public init(domain: String, domainOwner: String? = nil, endpointType: EndpointType? = nil, format: PackageFormat, repository: String) { self.domain = domain self.domainOwner = domainOwner + self.endpointType = endpointType self.format = format self.repository = repository } @@ -1931,6 +1940,7 @@ extension CodeArtifact { _ = encoder.container(keyedBy: CodingKeys.self) request.encodeQuery(self.domain, key: "domain") request.encodeQuery(self.domainOwner, key: "domain-owner") + request.encodeQuery(self.endpointType, key: "endpointType") request.encodeQuery(self.format, key: "format") request.encodeQuery(self.repository, key: "repository") } @@ -3743,7 +3753,7 @@ extension CodeArtifact { public struct RepositoryExternalConnectionInfo: AWSDecodableShape { /// The name of the external connection associated with a repository. public let externalConnectionName: String? - /// The package format associated with a repository's external connection. The valid package formats are: npm: A Node Package Manager (npm) package. pypi: A Python Package Index (PyPI) package. maven: A Maven package that contains compiled code in a distributable format, such as a JAR file. nuget: A NuGet package. + /// The package format associated with a repository's external connection. The valid package formats are: npm: A Node Package Manager (npm) package. pypi: A Python Package Index (PyPI) package. maven: A Maven package that contains compiled code in a distributable format, such as a JAR file. nuget: A NuGet package. generic: A generic package. ruby: A Ruby package. swift: A Swift package. cargo: A Cargo package. public let packageFormat: PackageFormat? /// The status of the external connection of a repository. There is one valid value, Available. public let status: ExternalConnectionStatus? diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift index 83a8f64cf0..b390d0089c 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_api.swift @@ -318,6 +318,7 @@ public struct CodeBuild: AWSService { /// - imageId: The Amazon Machine Image (AMI) of the compute fleet. /// - name: The name of the compute fleet. /// - overflowBehavior: The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. + /// - proxyConfiguration: The proxy configuration of the compute fleet. /// - scalingConfiguration: The scaling configuration of the compute fleet. /// - tags: A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. /// - vpcConfig: @@ -331,6 +332,7 @@ public struct CodeBuild: AWSService { imageId: String? = nil, name: String, overflowBehavior: FleetOverflowBehavior? = nil, + proxyConfiguration: ProxyConfiguration? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil, @@ -344,6 +346,7 @@ public struct CodeBuild: AWSService { imageId: imageId, name: name, overflowBehavior: overflowBehavior, + proxyConfiguration: proxyConfiguration, scalingConfiguration: scalingConfiguration, tags: tags, vpcConfig: vpcConfig @@ -1861,6 +1864,7 @@ public struct CodeBuild: AWSService { /// - fleetServiceRole: The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide. /// - imageId: The Amazon Machine Image (AMI) of the compute fleet. /// - overflowBehavior: The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. + /// - proxyConfiguration: The proxy configuration of the compute fleet. /// - scalingConfiguration: The scaling configuration of the compute fleet. /// - tags: A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. /// - vpcConfig: @@ -1874,6 +1878,7 @@ public struct CodeBuild: AWSService { fleetServiceRole: String? = nil, imageId: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, + proxyConfiguration: ProxyConfiguration? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil, @@ -1887,6 +1892,7 @@ public struct CodeBuild: AWSService { fleetServiceRole: fleetServiceRole, imageId: imageId, overflowBehavior: overflowBehavior, + proxyConfiguration: proxyConfiguration, scalingConfiguration: scalingConfiguration, tags: tags, vpcConfig: vpcConfig diff --git a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift index b1fac915f5..8173f75447 100644 --- a/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift +++ b/Sources/Soto/Services/CodeBuild/CodeBuild_shapes.swift @@ -165,6 +165,24 @@ extension CodeBuild { public var description: String { return self.rawValue } } + public enum FleetProxyRuleBehavior: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allowAll = "ALLOW_ALL" + case denyAll = "DENY_ALL" + public var description: String { return self.rawValue } + } + + public enum FleetProxyRuleEffectType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allow = "ALLOW" + case deny = "DENY" + public var description: String { return self.rawValue } + } + + public enum FleetProxyRuleType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case domain = "DOMAIN" + case ip = "IP" + public var description: String { return self.rawValue } + } + public enum FleetScalingMetricType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case fleetUtilizationRate = "FLEET_UTILIZATION_RATE" public var description: String { return self.rawValue } @@ -371,6 +389,7 @@ extension CodeBuild { case filePath = "FILE_PATH" case headRef = "HEAD_REF" case releaseName = "RELEASE_NAME" + case repositoryName = "REPOSITORY_NAME" case tagName = "TAG_NAME" case workflowName = "WORKFLOW_NAME" public var description: String { return self.rawValue } @@ -379,6 +398,7 @@ extension CodeBuild { public enum WebhookScopeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case githubGlobal = "GITHUB_GLOBAL" case githubOrganization = "GITHUB_ORGANIZATION" + case gitlabGroup = "GITLAB_GROUP" public var description: String { return self.rawValue } } @@ -1285,6 +1305,8 @@ extension CodeBuild { public let name: String /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. public let overflowBehavior: FleetOverflowBehavior? + /// The proxy configuration of the compute fleet. + public let proxyConfiguration: ProxyConfiguration? /// The scaling configuration of the compute fleet. public let scalingConfiguration: ScalingConfigurationInput? /// A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. @@ -1292,7 +1314,7 @@ extension CodeBuild { public let vpcConfig: VpcConfig? @inlinable - public init(baseCapacity: Int, computeType: ComputeType, environmentType: EnvironmentType, fleetServiceRole: String? = nil, imageId: String? = nil, name: String, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { + public init(baseCapacity: Int, computeType: ComputeType, environmentType: EnvironmentType, fleetServiceRole: String? = nil, imageId: String? = nil, name: String, overflowBehavior: FleetOverflowBehavior? = nil, proxyConfiguration: ProxyConfiguration? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { self.baseCapacity = baseCapacity self.computeType = computeType self.environmentType = environmentType @@ -1300,6 +1322,7 @@ extension CodeBuild { self.imageId = imageId self.name = name self.overflowBehavior = overflowBehavior + self.proxyConfiguration = proxyConfiguration self.scalingConfiguration = scalingConfiguration self.tags = tags self.vpcConfig = vpcConfig @@ -1312,6 +1335,7 @@ extension CodeBuild { try self.validate(self.name, name: "name", parent: name, max: 128) try self.validate(self.name, name: "name", parent: name, min: 2) try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9\\-_]{1,127}$") + try self.proxyConfiguration?.validate(name: "\(name).proxyConfiguration") try self.scalingConfiguration?.validate(name: "\(name).scalingConfiguration") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") @@ -1328,6 +1352,7 @@ extension CodeBuild { case imageId = "imageId" case name = "name" case overflowBehavior = "overflowBehavior" + case proxyConfiguration = "proxyConfiguration" case scalingConfiguration = "scalingConfiguration" case tags = "tags" case vpcConfig = "vpcConfig" @@ -2055,6 +2080,8 @@ extension CodeBuild { public let name: String? /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. public let overflowBehavior: FleetOverflowBehavior? + /// The proxy configuration of the compute fleet. + public let proxyConfiguration: ProxyConfiguration? /// The scaling configuration of the compute fleet. public let scalingConfiguration: ScalingConfigurationOutput? /// The status of the compute fleet. @@ -2064,7 +2091,7 @@ extension CodeBuild { public let vpcConfig: VpcConfig? @inlinable - public init(arn: String? = nil, baseCapacity: Int? = nil, computeType: ComputeType? = nil, created: Date? = nil, environmentType: EnvironmentType? = nil, fleetServiceRole: String? = nil, id: String? = nil, imageId: String? = nil, lastModified: Date? = nil, name: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationOutput? = nil, status: FleetStatus? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { + public init(arn: String? = nil, baseCapacity: Int? = nil, computeType: ComputeType? = nil, created: Date? = nil, environmentType: EnvironmentType? = nil, fleetServiceRole: String? = nil, id: String? = nil, imageId: String? = nil, lastModified: Date? = nil, name: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, proxyConfiguration: ProxyConfiguration? = nil, scalingConfiguration: ScalingConfigurationOutput? = nil, status: FleetStatus? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { self.arn = arn self.baseCapacity = baseCapacity self.computeType = computeType @@ -2076,6 +2103,7 @@ extension CodeBuild { self.lastModified = lastModified self.name = name self.overflowBehavior = overflowBehavior + self.proxyConfiguration = proxyConfiguration self.scalingConfiguration = scalingConfiguration self.status = status self.tags = tags @@ -2094,6 +2122,7 @@ extension CodeBuild { case lastModified = "lastModified" case name = "name" case overflowBehavior = "overflowBehavior" + case proxyConfiguration = "proxyConfiguration" case scalingConfiguration = "scalingConfiguration" case status = "status" case tags = "tags" @@ -2101,6 +2130,33 @@ extension CodeBuild { } } + public struct FleetProxyRule: AWSEncodableShape & AWSDecodableShape { + /// The behavior of the proxy rule. + public let effect: FleetProxyRuleEffectType + /// The destination of the proxy rule. + public let entities: [String] + /// The type of proxy rule. + public let type: FleetProxyRuleType + + @inlinable + public init(effect: FleetProxyRuleEffectType, entities: [String], type: FleetProxyRuleType) { + self.effect = effect + self.entities = entities + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.entities, name: "entities", parent: name, max: 100) + try self.validate(self.entities, name: "entities", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case effect = "effect" + case entities = "entities" + case type = "type" + } + } + public struct FleetStatus: AWSDecodableShape { /// Additional information about a compute fleet. Valid values include: CREATE_FAILED: The compute fleet has failed to create. UPDATE_FAILED: The compute fleet has failed to update. public let context: FleetContextCode? @@ -3360,6 +3416,31 @@ extension CodeBuild { } } + public struct ProxyConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The default behavior of outgoing traffic. + public let defaultBehavior: FleetProxyRuleBehavior? + /// An array of FleetProxyRule objects that represent the specified destination domains or IPs to allow or deny network access control to. + public let orderedProxyRules: [FleetProxyRule]? + + @inlinable + public init(defaultBehavior: FleetProxyRuleBehavior? = nil, orderedProxyRules: [FleetProxyRule]? = nil) { + self.defaultBehavior = defaultBehavior + self.orderedProxyRules = orderedProxyRules + } + + public func validate(name: String) throws { + try self.orderedProxyRules?.forEach { + try $0.validate(name: "\(name).orderedProxyRules[]") + } + try self.validate(self.orderedProxyRules, name: "orderedProxyRules", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case defaultBehavior = "defaultBehavior" + case orderedProxyRules = "orderedProxyRules" + } + } + public struct PutResourcePolicyInput: AWSEncodableShape { /// A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the CodeBuild User Guide. public let policy: String @@ -3808,11 +3889,11 @@ extension CodeBuild { } public struct ScopeConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The domain of the GitHub Enterprise organization. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE + /// The domain of the GitHub Enterprise organization or the GitLab Self Managed group. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE or GITLAB_SELF_MANAGED. public let domain: String? - /// The name of either the enterprise or organization that will send webhook events to CodeBuild, depending on if the webhook is a global or organization webhook respectively. + /// The name of either the group, enterprise, or organization that will send webhook events to CodeBuild, depending on the type of webhook. public let name: String - /// The type of scope for a GitHub webhook. + /// The type of scope for a GitHub or GitLab webhook. public let scope: WebhookScopeType @inlinable @@ -4423,6 +4504,8 @@ extension CodeBuild { public let imageId: String? /// The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see Example policy statement to allow CodeBuild access to Amazon Web Services services required to create a VPC network interface. public let overflowBehavior: FleetOverflowBehavior? + /// The proxy configuration of the compute fleet. + public let proxyConfiguration: ProxyConfiguration? /// The scaling configuration of the compute fleet. public let scalingConfiguration: ScalingConfigurationInput? /// A list of tag key and value pairs associated with this compute fleet. These tags are available for use by Amazon Web Services services that support CodeBuild build project tags. @@ -4430,7 +4513,7 @@ extension CodeBuild { public let vpcConfig: VpcConfig? @inlinable - public init(arn: String, baseCapacity: Int? = nil, computeType: ComputeType? = nil, environmentType: EnvironmentType? = nil, fleetServiceRole: String? = nil, imageId: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { + public init(arn: String, baseCapacity: Int? = nil, computeType: ComputeType? = nil, environmentType: EnvironmentType? = nil, fleetServiceRole: String? = nil, imageId: String? = nil, overflowBehavior: FleetOverflowBehavior? = nil, proxyConfiguration: ProxyConfiguration? = nil, scalingConfiguration: ScalingConfigurationInput? = nil, tags: [Tag]? = nil, vpcConfig: VpcConfig? = nil) { self.arn = arn self.baseCapacity = baseCapacity self.computeType = computeType @@ -4438,6 +4521,7 @@ extension CodeBuild { self.fleetServiceRole = fleetServiceRole self.imageId = imageId self.overflowBehavior = overflowBehavior + self.proxyConfiguration = proxyConfiguration self.scalingConfiguration = scalingConfiguration self.tags = tags self.vpcConfig = vpcConfig @@ -4448,6 +4532,7 @@ extension CodeBuild { try self.validate(self.baseCapacity, name: "baseCapacity", parent: name, min: 1) try self.validate(self.fleetServiceRole, name: "fleetServiceRole", parent: name, min: 1) try self.validate(self.imageId, name: "imageId", parent: name, min: 1) + try self.proxyConfiguration?.validate(name: "\(name).proxyConfiguration") try self.scalingConfiguration?.validate(name: "\(name).scalingConfiguration") try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") @@ -4464,6 +4549,7 @@ extension CodeBuild { case fleetServiceRole = "fleetServiceRole" case imageId = "imageId" case overflowBehavior = "overflowBehavior" + case proxyConfiguration = "proxyConfiguration" case scalingConfiguration = "scalingConfiguration" case tags = "tags" case vpcConfig = "vpcConfig" diff --git a/Sources/Soto/Services/CodeConnections/CodeConnections_api.swift b/Sources/Soto/Services/CodeConnections/CodeConnections_api.swift index 50740f6087..e201752557 100644 --- a/Sources/Soto/Services/CodeConnections/CodeConnections_api.swift +++ b/Sources/Soto/Services/CodeConnections/CodeConnections_api.swift @@ -219,6 +219,7 @@ public struct CodeConnections: AWSService { /// - branch: The branch in the repository from which changes will be synced. /// - configFile: The file name of the configuration file that manages syncing between the connection and the repository. This configuration file is stored in the repository. /// - publishDeploymentStatus: Whether to enable or disable publishing of deployment status to source providers. + /// - pullRequestComment: A toggle that specifies whether to enable or disable pull request comments for the sync configuration to be created. /// - repositoryLinkId: The ID of the repository link created for the connection. A repository link allows Git sync to monitor and sync changes to files in a specified Git repository. /// - resourceName: The name of the Amazon Web Services resource (for example, a CloudFormation stack in the case of CFN_STACK_SYNC) that will be synchronized from the linked repository. /// - roleArn: The ARN of the IAM role that grants permission for Amazon Web Services to use Git sync to update a given Amazon Web Services resource on your behalf. @@ -230,6 +231,7 @@ public struct CodeConnections: AWSService { branch: String, configFile: String, publishDeploymentStatus: PublishDeploymentStatus? = nil, + pullRequestComment: PullRequestComment? = nil, repositoryLinkId: String, resourceName: String, roleArn: String, @@ -241,6 +243,7 @@ public struct CodeConnections: AWSService { branch: branch, configFile: configFile, publishDeploymentStatus: publishDeploymentStatus, + pullRequestComment: pullRequestComment, repositoryLinkId: repositoryLinkId, resourceName: resourceName, roleArn: roleArn, @@ -979,6 +982,7 @@ public struct CodeConnections: AWSService { /// - branch: The branch for the sync configuration to be updated. /// - configFile: The configuration file for the sync configuration to be updated. /// - publishDeploymentStatus: Whether to enable or disable publishing of deployment status to source providers. + /// - pullRequestComment: TA toggle that specifies whether to enable or disable pull request comments for the sync configuration to be updated. /// - repositoryLinkId: The ID of the repository link for the sync configuration to be updated. /// - resourceName: The name of the Amazon Web Services resource for the sync configuration to be updated. /// - roleArn: The ARN of the IAM role for the sync configuration to be updated. @@ -990,6 +994,7 @@ public struct CodeConnections: AWSService { branch: String? = nil, configFile: String? = nil, publishDeploymentStatus: PublishDeploymentStatus? = nil, + pullRequestComment: PullRequestComment? = nil, repositoryLinkId: String? = nil, resourceName: String, roleArn: String? = nil, @@ -1001,6 +1006,7 @@ public struct CodeConnections: AWSService { branch: branch, configFile: configFile, publishDeploymentStatus: publishDeploymentStatus, + pullRequestComment: pullRequestComment, repositoryLinkId: repositoryLinkId, resourceName: resourceName, roleArn: roleArn, diff --git a/Sources/Soto/Services/CodeConnections/CodeConnections_shapes.swift b/Sources/Soto/Services/CodeConnections/CodeConnections_shapes.swift index 6dc08ff20f..37a680c86b 100644 --- a/Sources/Soto/Services/CodeConnections/CodeConnections_shapes.swift +++ b/Sources/Soto/Services/CodeConnections/CodeConnections_shapes.swift @@ -59,6 +59,12 @@ extension CodeConnections { public var description: String { return self.rawValue } } + public enum PullRequestComment: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum RepositorySyncStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case failed = "FAILED" case inProgress = "IN_PROGRESS" @@ -90,7 +96,7 @@ extension CodeConnections { // MARK: Shapes public struct Connection: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between Amazon Web Services. The ARN is never reused if the connection is deleted. + /// The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection reference when the connection is shared between Amazon Web Servicesservices. The ARN is never reused if the connection is deleted. public let connectionArn: String? /// The name of the connection. Connection names must be unique in an Amazon Web Services account. public let connectionName: String? @@ -310,6 +316,8 @@ extension CodeConnections { public let configFile: String /// Whether to enable or disable publishing of deployment status to source providers. public let publishDeploymentStatus: PublishDeploymentStatus? + /// A toggle that specifies whether to enable or disable pull request comments for the sync configuration to be created. + public let pullRequestComment: PullRequestComment? /// The ID of the repository link created for the connection. A repository link allows Git sync to monitor and sync changes to files in a specified Git repository. public let repositoryLinkId: String /// The name of the Amazon Web Services resource (for example, a CloudFormation stack in the case of CFN_STACK_SYNC) that will be synchronized from the linked repository. @@ -322,10 +330,11 @@ extension CodeConnections { public let triggerResourceUpdateOn: TriggerResourceUpdateOn? @inlinable - public init(branch: String, configFile: String, publishDeploymentStatus: PublishDeploymentStatus? = nil, repositoryLinkId: String, resourceName: String, roleArn: String, syncType: SyncConfigurationType, triggerResourceUpdateOn: TriggerResourceUpdateOn? = nil) { + public init(branch: String, configFile: String, publishDeploymentStatus: PublishDeploymentStatus? = nil, pullRequestComment: PullRequestComment? = nil, repositoryLinkId: String, resourceName: String, roleArn: String, syncType: SyncConfigurationType, triggerResourceUpdateOn: TriggerResourceUpdateOn? = nil) { self.branch = branch self.configFile = configFile self.publishDeploymentStatus = publishDeploymentStatus + self.pullRequestComment = pullRequestComment self.repositoryLinkId = repositoryLinkId self.resourceName = resourceName self.roleArn = roleArn @@ -350,6 +359,7 @@ extension CodeConnections { case branch = "Branch" case configFile = "ConfigFile" case publishDeploymentStatus = "PublishDeploymentStatus" + case pullRequestComment = "PullRequestComment" case repositoryLinkId = "RepositoryLinkId" case resourceName = "ResourceName" case roleArn = "RoleArn" @@ -1353,6 +1363,8 @@ extension CodeConnections { public let providerType: ProviderType /// Whether to enable or disable publishing of deployment status to source providers. public let publishDeploymentStatus: PublishDeploymentStatus? + /// A toggle that specifies whether to enable or disable pull request comments for the sync configuration to be created. + public let pullRequestComment: PullRequestComment? /// The ID of the repository link associated with a specific sync configuration. public let repositoryLinkId: String /// The name of the repository associated with a specific sync configuration. @@ -1367,12 +1379,13 @@ extension CodeConnections { public let triggerResourceUpdateOn: TriggerResourceUpdateOn? @inlinable - public init(branch: String, configFile: String? = nil, ownerId: String, providerType: ProviderType, publishDeploymentStatus: PublishDeploymentStatus? = nil, repositoryLinkId: String, repositoryName: String, resourceName: String, roleArn: String, syncType: SyncConfigurationType, triggerResourceUpdateOn: TriggerResourceUpdateOn? = nil) { + public init(branch: String, configFile: String? = nil, ownerId: String, providerType: ProviderType, publishDeploymentStatus: PublishDeploymentStatus? = nil, pullRequestComment: PullRequestComment? = nil, repositoryLinkId: String, repositoryName: String, resourceName: String, roleArn: String, syncType: SyncConfigurationType, triggerResourceUpdateOn: TriggerResourceUpdateOn? = nil) { self.branch = branch self.configFile = configFile self.ownerId = ownerId self.providerType = providerType self.publishDeploymentStatus = publishDeploymentStatus + self.pullRequestComment = pullRequestComment self.repositoryLinkId = repositoryLinkId self.repositoryName = repositoryName self.resourceName = resourceName @@ -1387,6 +1400,7 @@ extension CodeConnections { case ownerId = "OwnerId" case providerType = "ProviderType" case publishDeploymentStatus = "PublishDeploymentStatus" + case pullRequestComment = "PullRequestComment" case repositoryLinkId = "RepositoryLinkId" case repositoryName = "RepositoryName" case resourceName = "ResourceName" @@ -1633,6 +1647,8 @@ extension CodeConnections { public let configFile: String? /// Whether to enable or disable publishing of deployment status to source providers. public let publishDeploymentStatus: PublishDeploymentStatus? + /// TA toggle that specifies whether to enable or disable pull request comments for the sync configuration to be updated. + public let pullRequestComment: PullRequestComment? /// The ID of the repository link for the sync configuration to be updated. public let repositoryLinkId: String? /// The name of the Amazon Web Services resource for the sync configuration to be updated. @@ -1645,10 +1661,11 @@ extension CodeConnections { public let triggerResourceUpdateOn: TriggerResourceUpdateOn? @inlinable - public init(branch: String? = nil, configFile: String? = nil, publishDeploymentStatus: PublishDeploymentStatus? = nil, repositoryLinkId: String? = nil, resourceName: String, roleArn: String? = nil, syncType: SyncConfigurationType, triggerResourceUpdateOn: TriggerResourceUpdateOn? = nil) { + public init(branch: String? = nil, configFile: String? = nil, publishDeploymentStatus: PublishDeploymentStatus? = nil, pullRequestComment: PullRequestComment? = nil, repositoryLinkId: String? = nil, resourceName: String, roleArn: String? = nil, syncType: SyncConfigurationType, triggerResourceUpdateOn: TriggerResourceUpdateOn? = nil) { self.branch = branch self.configFile = configFile self.publishDeploymentStatus = publishDeploymentStatus + self.pullRequestComment = pullRequestComment self.repositoryLinkId = repositoryLinkId self.resourceName = resourceName self.roleArn = roleArn @@ -1673,6 +1690,7 @@ extension CodeConnections { case branch = "Branch" case configFile = "ConfigFile" case publishDeploymentStatus = "PublishDeploymentStatus" + case pullRequestComment = "PullRequestComment" case repositoryLinkId = "RepositoryLinkId" case resourceName = "ResourceName" case roleArn = "RoleArn" diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift index ed46d16ea0..a8374f08b9 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS CodePipeline service. /// -/// CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success. +/// CodePipeline Overview This is the CodePipeline API Reference. This guide provides descriptions of the actions and data types for CodePipeline. Some functionality for your pipeline can only be configured through the API. For more information, see the CodePipeline User Guide. You can use the CodePipeline API to work with pipelines, stages, actions, and transitions. Pipelines are models of automated release processes. Each pipeline is uniquely named, and consists of stages, actions, and transitions. You can work with pipelines by calling: CreatePipeline, which creates a uniquely named pipeline. DeletePipeline, which deletes the specified pipeline. GetPipeline, which returns information about the pipeline structure and pipeline metadata, including the pipeline Amazon Resource Name (ARN). GetPipelineExecution, which returns information about a specific execution of a pipeline. GetPipelineState, which returns information about the current state of the stages and actions of a pipeline. ListActionExecutions, which returns action-level details for past executions. The details include full stage and action-level details, including individual action duration, status, any errors that occurred during the execution, and input and output artifact location details. ListPipelines, which gets a summary of all of the pipelines associated with your account. ListPipelineExecutions, which gets a summary of the most recent executions for a pipeline. StartPipelineExecution, which runs the most recent revision of an artifact through the pipeline. StopPipelineExecution, which stops the specified pipeline execution from continuing through the pipeline. UpdatePipeline, which updates a pipeline with edits or changes to the structure of the pipeline. Pipelines include stages. Each stage contains one or more actions that must complete before the next stage begins. A stage results in success or failure. If a stage fails, the pipeline stops at that stage and remains stopped until either a new version of an artifact appears in the source location, or a user takes action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the status of stages in the pipeline, or GetPipeline, which returns the entire structure of the pipeline, including the stages of that pipeline. For more information about the structure of stages and actions, see CodePipeline Pipeline Structure Reference. Pipeline stages include actions that are categorized into categories such as source or build actions performed in a stage of a pipeline. For example, you can use a source action to import artifacts into a pipeline from a source such as Amazon S3. Like stages, you do not work with actions directly in most cases, but you do define and interact with actions when working with pipeline operations such as CreatePipeline and GetPipelineState. Valid action categories are: Source Build Test Deploy Approval Invoke Compute Pipelines also include transitions, which allow the transition of artifacts from one stage to the next in a pipeline after the actions in one stage complete. You can work with transitions by calling: DisableStageTransition, which prevents artifacts from transitioning to the next stage in a pipeline. EnableStageTransition, which enables transition of artifacts between stages in a pipeline. Using the API to integrate with CodePipeline For third-party integrators or developers who want to create their own integrations with CodePipeline, the expected sequence varies from the standard API user. To integrate with CodePipeline, developers need to work with the following items: Jobs, which are instances of an action. For example, a job for a source action might import a revision of an artifact from a source. You can work with jobs by calling: AcknowledgeJob, which confirms whether a job worker has received the specified job. GetJobDetails, which returns the details of a job. PollForJobs, which determines whether there are any jobs to act on. PutJobFailureResult, which provides details of a job failure. PutJobSuccessResult, which provides details of a job success. Third party jobs, which are instances of an action created by a partner action and integrated into CodePipeline. Partner actions are created by members of the Amazon Web Services Partner Network. You can work with third party jobs by calling: AcknowledgeThirdPartyJob, which confirms whether a job worker has received the specified job. GetThirdPartyJobDetails, which requests the details of a job for a partner action. PollForThirdPartyJobs, which determines whether there are any jobs to act on. PutThirdPartyJobFailureResult, which provides details of a job failure. PutThirdPartyJobSuccessResult, which provides details of a job success. public struct CodePipeline: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift index de44861248..5e7942cc0e 100644 --- a/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift +++ b/Sources/Soto/Services/CodePipeline/CodePipeline_shapes.swift @@ -29,6 +29,7 @@ extension CodePipeline { public enum ActionCategory: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case approval = "Approval" case build = "Build" + case compute = "Compute" case deploy = "Deploy" case invoke = "Invoke" case source = "Source" @@ -172,7 +173,15 @@ extension CodePipeline { public enum Result: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case fail = "FAIL" + case retry = "RETRY" case rollback = "ROLLBACK" + case skip = "SKIP" + public var description: String { return self.rawValue } + } + + public enum RetryTrigger: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case automatedStageRetry = "AutomatedStageRetry" + case manualStageRetry = "ManualStageRetry" public var description: String { return self.rawValue } } @@ -213,6 +222,7 @@ extension CodePipeline { case cancelled = "Cancelled" case failed = "Failed" case inProgress = "InProgress" + case skipped = "Skipped" case stopped = "Stopped" case stopping = "Stopping" case succeeded = "Succeeded" @@ -444,6 +454,8 @@ extension CodePipeline { public struct ActionDeclaration: AWSEncodableShape & AWSDecodableShape { /// Specifies the action type and the provider of the action. public let actionTypeId: ActionTypeId + /// The shell commands to run with your compute action in CodePipeline. All commands are supported except multi-line formats. While CodeBuild logs and permissions are used, you do not need to create any resources in CodeBuild. Using compute time for this action will incur separate charges in CodeBuild. + public let commands: [String]? /// The action's configuration. These are key-value pairs that specify input values for an action. For more information, see Action Structure Requirements in CodePipeline. For the list of configuration properties for the CloudFormation action type in CodePipeline, see Configuration Properties Reference in the CloudFormation User Guide. For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the CloudFormation User Guide. The values can be represented in either JSON or YAML format. For example, the JSON configuration item format is as follows: JSON: "Configuration" : { Key : Value }, public let configuration: [String: String]? /// The name or ID of the artifact consumed by the action, such as a test or build artifact. @@ -454,6 +466,8 @@ extension CodePipeline { public let namespace: String? /// The name or ID of the result of the action declaration, such as a test or build artifact. public let outputArtifacts: [OutputArtifact]? + /// The list of variables that are to be exported from the compute action. This is specifically CodeBuild environment variables as used for that action. + public let outputVariables: [String]? /// The action declaration's Amazon Web Services Region, such as us-east-1. public let region: String? /// The ARN of the IAM service role that performs the declared action. This is assumed through the roleArn for the pipeline. @@ -464,13 +478,15 @@ extension CodePipeline { public let timeoutInMinutes: Int? @inlinable - public init(actionTypeId: ActionTypeId, configuration: [String: String]? = nil, inputArtifacts: [InputArtifact]? = nil, name: String, namespace: String? = nil, outputArtifacts: [OutputArtifact]? = nil, region: String? = nil, roleArn: String? = nil, runOrder: Int? = nil, timeoutInMinutes: Int? = nil) { + public init(actionTypeId: ActionTypeId, commands: [String]? = nil, configuration: [String: String]? = nil, inputArtifacts: [InputArtifact]? = nil, name: String, namespace: String? = nil, outputArtifacts: [OutputArtifact]? = nil, outputVariables: [String]? = nil, region: String? = nil, roleArn: String? = nil, runOrder: Int? = nil, timeoutInMinutes: Int? = nil) { self.actionTypeId = actionTypeId + self.commands = commands self.configuration = configuration self.inputArtifacts = inputArtifacts self.name = name self.namespace = namespace self.outputArtifacts = outputArtifacts + self.outputVariables = outputVariables self.region = region self.roleArn = roleArn self.runOrder = runOrder @@ -479,6 +495,12 @@ extension CodePipeline { public func validate(name: String) throws { try self.actionTypeId.validate(name: "\(name).actionTypeId") + try self.commands?.forEach { + try validate($0, name: "commands[]", parent: name, max: 1000) + try validate($0, name: "commands[]", parent: name, min: 1) + } + try self.validate(self.commands, name: "commands", parent: name, max: 50) + try self.validate(self.commands, name: "commands", parent: name, min: 1) try self.configuration?.forEach { try validate($0.key, name: "configuration.key", parent: name, max: 50) try validate($0.key, name: "configuration.key", parent: name, min: 1) @@ -497,6 +519,12 @@ extension CodePipeline { try self.outputArtifacts?.forEach { try $0.validate(name: "\(name).outputArtifacts[]") } + try self.outputVariables?.forEach { + try validate($0, name: "outputVariables[]", parent: name, max: 128) + try validate($0, name: "outputVariables[]", parent: name, min: 1) + } + try self.validate(self.outputVariables, name: "outputVariables", parent: name, max: 15) + try self.validate(self.outputVariables, name: "outputVariables", parent: name, min: 1) try self.validate(self.region, name: "region", parent: name, max: 30) try self.validate(self.region, name: "region", parent: name, min: 4) try self.validate(self.roleArn, name: "roleArn", parent: name, max: 1024) @@ -509,11 +537,13 @@ extension CodePipeline { private enum CodingKeys: String, CodingKey { case actionTypeId = "actionTypeId" + case commands = "commands" case configuration = "configuration" case inputArtifacts = "inputArtifacts" case name = "name" case namespace = "namespace" case outputArtifacts = "outputArtifacts" + case outputVariables = "outputVariables" case region = "region" case roleArn = "roleArn" case runOrder = "runOrder" @@ -1867,11 +1897,14 @@ extension CodePipeline { public let conditions: [Condition]? /// The specified result for when the failure conditions are met, such as rolling back the stage. public let result: Result? + /// The retry configuration specifies automatic retry for a failed stage, along with the configured retry mode. + public let retryConfiguration: RetryConfiguration? @inlinable - public init(conditions: [Condition]? = nil, result: Result? = nil) { + public init(conditions: [Condition]? = nil, result: Result? = nil, retryConfiguration: RetryConfiguration? = nil) { self.conditions = conditions self.result = result + self.retryConfiguration = retryConfiguration } public func validate(name: String) throws { @@ -1885,6 +1918,7 @@ extension CodePipeline { private enum CodingKeys: String, CodingKey { case conditions = "conditions" case result = "result" + case retryConfiguration = "retryConfiguration" } } @@ -2974,21 +3008,31 @@ extension CodePipeline { } public struct OutputArtifact: AWSEncodableShape & AWSDecodableShape { + /// The files that you want to associate with the output artifact that will be exported from the compute action. + public let files: [String]? /// The name of the output of an artifact, such as "My App". The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions. Output artifact names must be unique within a pipeline. public let name: String @inlinable - public init(name: String) { + public init(files: [String]? = nil, name: String) { + self.files = files self.name = name } public func validate(name: String) throws { + try self.files?.forEach { + try validate($0, name: "files[]", parent: name, max: 128) + try validate($0, name: "files[]", parent: name, min: 1) + } + try self.validate(self.files, name: "files", parent: name, max: 10) + try self.validate(self.files, name: "files", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, max: 100) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_\\-]+$") } private enum CodingKeys: String, CodingKey { + case files = "files" case name = "name" } } @@ -3833,6 +3877,20 @@ extension CodePipeline { } } + public struct RetryConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The method that you want to configure for automatic stage retry on stage failure. You can specify to retry only failed action in the stage or all actions in the stage. + public let retryMode: StageRetryMode? + + @inlinable + public init(retryMode: StageRetryMode? = nil) { + self.retryMode = retryMode + } + + private enum CodingKeys: String, CodingKey { + case retryMode = "retryMode" + } + } + public struct RetryStageExecutionInput: AWSEncodableShape { /// The ID of the pipeline execution in the failed stage to be retried. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the failed stage public let pipelineExecutionId: String @@ -3883,6 +3941,28 @@ extension CodePipeline { } } + public struct RetryStageMetadata: AWSDecodableShape { + /// The number of attempts for a specific stage with automatic retry on stage failure. One attempt is allowed for automatic stage retry on failure. + public let autoStageRetryAttempt: Int? + /// The latest trigger for a specific stage where manual or automatic retries have been made upon stage failure. + public let latestRetryTrigger: RetryTrigger? + /// The number of attempts for a specific stage where manual retries have been made upon stage failure. + public let manualStageRetryAttempt: Int? + + @inlinable + public init(autoStageRetryAttempt: Int? = nil, latestRetryTrigger: RetryTrigger? = nil, manualStageRetryAttempt: Int? = nil) { + self.autoStageRetryAttempt = autoStageRetryAttempt + self.latestRetryTrigger = latestRetryTrigger + self.manualStageRetryAttempt = manualStageRetryAttempt + } + + private enum CodingKeys: String, CodingKey { + case autoStageRetryAttempt = "autoStageRetryAttempt" + case latestRetryTrigger = "latestRetryTrigger" + case manualStageRetryAttempt = "manualStageRetryAttempt" + } + } + public struct RollbackStageInput: AWSEncodableShape { /// The name of the pipeline for which the stage will be rolled back. public let pipelineName: String @@ -4152,7 +4232,7 @@ extension CodePipeline { public struct RuleExecutionInput: AWSDecodableShape { /// Configuration data for a rule execution, such as the resolved values for that run. public let configuration: [String: String]? - /// Details of input artifacts of the rule that correspond to the rule execution. + /// Details of input artifacts of the rule that correspond to the rule execution. public let inputArtifacts: [ArtifactDetail]? /// The Amazon Web Services Region for the rule, such as us-east-1. public let region: String? @@ -4589,11 +4669,13 @@ extension CodePipeline { public let onFailureConditionState: StageConditionState? /// The state of the success conditions for a stage. public let onSuccessConditionState: StageConditionState? + /// he details of a specific automatic retry on stage failure, including the attempt number and trigger. + public let retryStageMetadata: RetryStageMetadata? /// The name of the stage. public let stageName: String? @inlinable - public init(actionStates: [ActionState]? = nil, beforeEntryConditionState: StageConditionState? = nil, inboundExecution: StageExecution? = nil, inboundExecutions: [StageExecution]? = nil, inboundTransitionState: TransitionState? = nil, latestExecution: StageExecution? = nil, onFailureConditionState: StageConditionState? = nil, onSuccessConditionState: StageConditionState? = nil, stageName: String? = nil) { + public init(actionStates: [ActionState]? = nil, beforeEntryConditionState: StageConditionState? = nil, inboundExecution: StageExecution? = nil, inboundExecutions: [StageExecution]? = nil, inboundTransitionState: TransitionState? = nil, latestExecution: StageExecution? = nil, onFailureConditionState: StageConditionState? = nil, onSuccessConditionState: StageConditionState? = nil, retryStageMetadata: RetryStageMetadata? = nil, stageName: String? = nil) { self.actionStates = actionStates self.beforeEntryConditionState = beforeEntryConditionState self.inboundExecution = inboundExecution @@ -4602,6 +4684,7 @@ extension CodePipeline { self.latestExecution = latestExecution self.onFailureConditionState = onFailureConditionState self.onSuccessConditionState = onSuccessConditionState + self.retryStageMetadata = retryStageMetadata self.stageName = stageName } @@ -4614,6 +4697,7 @@ extension CodePipeline { case latestExecution = "latestExecution" case onFailureConditionState = "onFailureConditionState" case onSuccessConditionState = "onSuccessConditionState" + case retryStageMetadata = "retryStageMetadata" case stageName = "stageName" } } diff --git a/Sources/Soto/Services/Connect/Connect_api.swift b/Sources/Soto/Services/Connect/Connect_api.swift index 8a6b918c46..dd9fa99292 100644 --- a/Sources/Soto/Services/Connect/Connect_api.swift +++ b/Sources/Soto/Services/Connect/Connect_api.swift @@ -123,7 +123,7 @@ public struct Connect: AWSService { return try await self.activateEvaluationForm(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Associates the specified dataset for a Amazon Connect instance with the target account. You can associate only one dataset in a single call. + /// Associates the specified dataset for a Amazon Connect instance with the target account. You can associate only one dataset in a single call. @Sendable @inlinable public func associateAnalyticsDataSet(_ input: AssociateAnalyticsDataSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateAnalyticsDataSetResponse { @@ -136,7 +136,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Associates the specified dataset for a Amazon Connect instance with the target account. You can associate only one dataset in a single call. + /// Associates the specified dataset for a Amazon Connect instance with the target account. You can associate only one dataset in a single call. /// /// Parameters: /// - dataSetId: The identifier of the dataset to associate with the target account. @@ -604,7 +604,7 @@ public struct Connect: AWSService { return try await self.associateUserProficiencies(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Associates a list of analytics datasets for a given Amazon Connect instance to a target account. You can associate multiple datasets in a single call. + /// Associates a list of analytics datasets for a given Amazon Connect instance to a target account. You can associate multiple datasets in a single call. @Sendable @inlinable public func batchAssociateAnalyticsDataSet(_ input: BatchAssociateAnalyticsDataSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchAssociateAnalyticsDataSetResponse { @@ -617,7 +617,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Associates a list of analytics datasets for a given Amazon Connect instance to a target account. You can associate multiple datasets in a single call. + /// Associates a list of analytics datasets for a given Amazon Connect instance to a target account. You can associate multiple datasets in a single call. /// /// Parameters: /// - dataSetIds: An array of dataset identifiers to associate. @@ -639,7 +639,7 @@ public struct Connect: AWSService { return try await self.batchAssociateAnalyticsDataSet(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Removes a list of analytics datasets associated with a given Amazon Connect instance. You can disassociate multiple datasets in a single call. + /// Removes a list of analytics datasets associated with a given Amazon Connect instance. You can disassociate multiple datasets in a single call. @Sendable @inlinable public func batchDisassociateAnalyticsDataSet(_ input: BatchDisassociateAnalyticsDataSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDisassociateAnalyticsDataSetResponse { @@ -652,7 +652,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Removes a list of analytics datasets associated with a given Amazon Connect instance. You can disassociate multiple datasets in a single call. + /// Removes a list of analytics datasets associated with a given Amazon Connect instance. You can disassociate multiple datasets in a single call. /// /// Parameters: /// - dataSetIds: An array of associated dataset identifiers to remove. @@ -2784,7 +2784,7 @@ public struct Connect: AWSService { return try await self.describeAuthenticationProfile(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Describes the specified contact. Contact information remains available in Amazon Connect for 24 months, and then it is deleted. Only data from November 12, 2021, and later is returned by this API. + /// This API is in preview release for Amazon Connect and is subject to change. Describes the specified contact. Contact information remains available in Amazon Connect for 24 months from the InitiationTimestamp, and then it is deleted. Only contact information that is available in Amazon Connect is returned by this API @Sendable @inlinable public func describeContact(_ input: DescribeContactRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeContactResponse { @@ -2797,7 +2797,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Describes the specified contact. Contact information remains available in Amazon Connect for 24 months, and then it is deleted. Only data from November 12, 2021, and later is returned by this API. + /// This API is in preview release for Amazon Connect and is subject to change. Describes the specified contact. Contact information remains available in Amazon Connect for 24 months from the InitiationTimestamp, and then it is deleted. Only contact information that is available in Amazon Connect is returned by this API /// /// Parameters: /// - contactId: The identifier of the contact. @@ -3514,7 +3514,7 @@ public struct Connect: AWSService { return try await self.describeVocabulary(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Removes the dataset ID associated with a given Amazon Connect instance. + /// Removes the dataset ID associated with a given Amazon Connect instance. @Sendable @inlinable public func disassociateAnalyticsDataSet(_ input: DisassociateAnalyticsDataSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3527,7 +3527,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Removes the dataset ID associated with a given Amazon Connect instance. + /// Removes the dataset ID associated with a given Amazon Connect instance. /// /// Parameters: /// - dataSetId: The identifier of the dataset to remove. @@ -4282,7 +4282,7 @@ public struct Connect: AWSService { /// - groupings: The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION /// - interval: The interval period and timezone to apply to returned metrics. IntervalPeriod: An aggregated grouping applied to request metrics. Valid IntervalPeriod values are: FIFTEEN_MIN | THIRTY_MIN | HOUR | DAY | WEEK | TOTAL. For example, if IntervalPeriod is selected THIRTY_MIN, StartTime and EndTime differs by 1 day, then Amazon Connect returns 48 results in the response. Each result is aggregated by the THIRTY_MIN period. By default Amazon Connect aggregates results based on the TOTAL interval period. The following list describes restrictions on StartTime and EndTime based on which IntervalPeriod is requested. FIFTEEN_MIN: The difference between StartTime and EndTime must be less than 3 days. THIRTY_MIN: The difference between StartTime and EndTime must be less than 3 days. HOUR: The difference between StartTime and EndTime must be less than 3 days. DAY: The difference between StartTime and EndTime must be less than 35 days. WEEK: The difference between StartTime and EndTime must be less than 35 days. TOTAL: The difference between StartTime and EndTime must be less than 35 days. TimeZone: The timezone applied to requested metrics. /// - maxResults: The maximum number of results to return per page. - /// - metrics: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts + /// - metrics: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts /// - nextToken: The token for the next set of results. Use the value returned in the previous /// - resourceArn: The Amazon Resource Name (ARN) of the resource. This includes the instanceId an Amazon Connect instance. /// - startTime: The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be before the end time timestamp. The start and end time depends on the IntervalPeriod selected. By default the time range between start and end time is 35 days. Historical metrics are available for 3 months. @@ -4489,7 +4489,7 @@ public struct Connect: AWSService { return try await self.listAgentStatuses(input, logger: logger) } - /// This API is in preview release for Amazon Connect and is subject to change. Lists the association status of requested dataset ID for a given Amazon Connect instance. + /// Lists the association status of requested dataset ID for a given Amazon Connect instance. @Sendable @inlinable public func listAnalyticsDataAssociations(_ input: ListAnalyticsDataAssociationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAnalyticsDataAssociationsResponse { @@ -4502,7 +4502,7 @@ public struct Connect: AWSService { logger: logger ) } - /// This API is in preview release for Amazon Connect and is subject to change. Lists the association status of requested dataset ID for a given Amazon Connect instance. + /// Lists the association status of requested dataset ID for a given Amazon Connect instance. /// /// Parameters: /// - dataSetId: The identifier of the dataset to get the association status. @@ -7307,6 +7307,68 @@ public struct Connect: AWSService { return try await self.startContactStreaming(input, logger: logger) } + /// Initiates a new outbound SMS contact to a customer. Response of this API provides the ContactId of the outbound SMS contact created. SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as Type. ContactFlowId initiates the flow to manage the new SMS contact created. This API can be used to initiate outbound SMS contacts for an agent or it can also deflect an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action. For more information about using SMS in Amazon Connect, see the following topics in the Amazon Connect Administrator Guide: Set up SMS messaging Request an SMS-enabled phone number through AWS End User Messaging SMS + @Sendable + @inlinable + public func startOutboundChatContact(_ input: StartOutboundChatContactRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartOutboundChatContactResponse { + try await self.client.execute( + operation: "StartOutboundChatContact", + path: "/contact/outbound-chat", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Initiates a new outbound SMS contact to a customer. Response of this API provides the ContactId of the outbound SMS contact created. SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as Type. ContactFlowId initiates the flow to manage the new SMS contact created. This API can be used to initiate outbound SMS contacts for an agent or it can also deflect an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action. For more information about using SMS in Amazon Connect, see the following topics in the Amazon Connect Administrator Guide: Set up SMS messaging Request an SMS-enabled phone number through AWS End User Messaging SMS + /// + /// Parameters: + /// - attributes: A custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in flows just like any other contact attributes. + /// - chatDurationInMinutes: The total duration of the newly started chat session. If not specified, the chat session duration defaults to 25 hour. The minimum configurable time is 60 minutes. The maximum configurable time is 10,080 minutes (7 days). + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. The token is valid for 7 days after creation. If a contact is already started, the contact ID is returned. + /// - contactFlowId: The identifier of the flow for the call. To see the ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to Routing, Contact Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/123ec456-a007-89c0-1234-xxxxxxxxxxxx + /// - destinationEndpoint: + /// - initialSystemMessage: + /// - instanceId: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + /// - participantDetails: + /// - relatedContactId: The unique identifier for an Amazon Connect contact. This identifier is related to the contact starting. + /// - segmentAttributes: A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in flows. Attribute keys can include only alphanumeric, -, and _. This field can be used to show channel subtype, such as connect:Guide and connect:SMS. + /// - sourceEndpoint: + /// - supportedMessagingContentTypes: The supported chat message content types. Supported types are: text/plain text/markdown application/json, application/vnd.amazonaws.connect.message.interactive application/vnd.amazonaws.connect.message.interactive.response Content types must always contain text/plain. You can then put any other supported type in the list. For example, all the following lists are valid because they contain text/plain: [text/plain, text/markdown, application/json] [text/markdown, text/plain] [text/plain, application/json, application/vnd.amazonaws.connect.message.interactive.response] + /// - logger: Logger use during operation + @inlinable + public func startOutboundChatContact( + attributes: [String: String]? = nil, + chatDurationInMinutes: Int? = nil, + clientToken: String? = StartOutboundChatContactRequest.idempotencyToken(), + contactFlowId: String, + destinationEndpoint: Endpoint, + initialSystemMessage: ChatMessage? = nil, + instanceId: String, + participantDetails: ParticipantDetails? = nil, + relatedContactId: String? = nil, + segmentAttributes: [String: SegmentAttributeValue], + sourceEndpoint: Endpoint, + supportedMessagingContentTypes: [String]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StartOutboundChatContactResponse { + let input = StartOutboundChatContactRequest( + attributes: attributes, + chatDurationInMinutes: chatDurationInMinutes, + clientToken: clientToken, + contactFlowId: contactFlowId, + destinationEndpoint: destinationEndpoint, + initialSystemMessage: initialSystemMessage, + instanceId: instanceId, + participantDetails: participantDetails, + relatedContactId: relatedContactId, + segmentAttributes: segmentAttributes, + sourceEndpoint: sourceEndpoint, + supportedMessagingContentTypes: supportedMessagingContentTypes + ) + return try await self.startOutboundChatContact(input, logger: logger) + } + /// Places an outbound call to a contact, and then initiates the flow. It performs the actions in the flow that's specified (in ContactFlowId). Agents do not initiate the outbound API, which means that they do not dial the contact. If the flow places an outbound call to a contact, and then puts the contact in queue, the call is then routed to the agent, like any other inbound case. There is a 60-second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails. UK numbers with a 447 prefix are not allowed by default. Before you can dial these UK mobile numbers, you must submit a service quota increase request. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide. Campaign calls are not allowed by default. Before you can make a call with TrafficType = CAMPAIGN, you must submit a service quota increase request to the quota Amazon Connect campaigns. @Sendable @inlinable @@ -9831,7 +9893,7 @@ extension Connect { /// - groupings: The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION /// - interval: The interval period and timezone to apply to returned metrics. IntervalPeriod: An aggregated grouping applied to request metrics. Valid IntervalPeriod values are: FIFTEEN_MIN | THIRTY_MIN | HOUR | DAY | WEEK | TOTAL. For example, if IntervalPeriod is selected THIRTY_MIN, StartTime and EndTime differs by 1 day, then Amazon Connect returns 48 results in the response. Each result is aggregated by the THIRTY_MIN period. By default Amazon Connect aggregates results based on the TOTAL interval period. The following list describes restrictions on StartTime and EndTime based on which IntervalPeriod is requested. FIFTEEN_MIN: The difference between StartTime and EndTime must be less than 3 days. THIRTY_MIN: The difference between StartTime and EndTime must be less than 3 days. HOUR: The difference between StartTime and EndTime must be less than 3 days. DAY: The difference between StartTime and EndTime must be less than 35 days. WEEK: The difference between StartTime and EndTime must be less than 35 days. TOTAL: The difference between StartTime and EndTime must be less than 35 days. TimeZone: The timezone applied to requested metrics. /// - maxResults: The maximum number of results to return per page. - /// - metrics: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts + /// - metrics: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts /// - resourceArn: The Amazon Resource Name (ARN) of the resource. This includes the instanceId an Amazon Connect instance. /// - startTime: The timestamp, in UNIX Epoch time format, at which to start the reporting interval for the retrieval of historical metrics data. The time must be before the end time timestamp. The start and end time depends on the IntervalPeriod selected. By default the time range between start and end time is 35 days. Historical metrics are available for 3 months. /// - logger: Logger used for logging diff --git a/Sources/Soto/Services/Connect/Connect_shapes.swift b/Sources/Soto/Services/Connect/Connect_shapes.swift index 53a73d6c04..a5b7795cfc 100644 --- a/Sources/Soto/Services/Connect/Connect_shapes.swift +++ b/Sources/Soto/Services/Connect/Connect_shapes.swift @@ -200,6 +200,7 @@ extension Connect { } public enum EndpointType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case connectPhonenumberArn = "CONNECT_PHONENUMBER_ARN" case contactFlow = "CONTACT_FLOW" case telephoneNumber = "TELEPHONE_NUMBER" case voip = "VOIP" @@ -9981,7 +9982,7 @@ extension Connect { public let interval: IntervalDetails? /// The maximum number of results to return per page. public let maxResults: Int? - /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for "Less than"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts + /// The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts public let metrics: [MetricV2] /// The token for the next set of results. Use the value returned in the previous /// response in the next request to retrieve the next set of results. @@ -10010,7 +10011,7 @@ extension Connect { } try self.validate(self.filters, name: "filters", parent: name, max: 5) try self.validate(self.filters, name: "filters", parent: name, min: 1) - try self.validate(self.groupings, name: "groupings", parent: name, max: 3) + try self.validate(self.groupings, name: "groupings", parent: name, max: 4) try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.metrics.forEach { @@ -13950,7 +13951,7 @@ extension Connect { public struct MetricFilterV2: AWSEncodableShape & AWSDecodableShape { /// The key to use for filtering data. Valid metric filter keys: INITIATION_METHOD, DISCONNECT_REASON. These are the same values as the InitiationMethod and DisconnectReason in the contact record. For more information, see ContactTraceRecord in the Amazon Connect Administrator Guide. public let metricFilterKey: String? - /// The values to use for filtering data. Valid metric filter values for INITIATION_METHOD: INBOUND | OUTBOUND | TRANSFER | QUEUE_TRANSFER | CALLBACK | API Valid metric filter values for DISCONNECT_REASON: CUSTOMER_DISCONNECT | AGENT_DISCONNECT | THIRD_PARTY_DISCONNECT | TELECOM_PROBLEM | BARGED | CONTACT_FLOW_DISCONNECT | OTHER | EXPIRED | API + /// The values to use for filtering data. Valid metric filter values for INITIATION_METHOD: INBOUND | OUTBOUND | TRANSFER | QUEUE_TRANSFER | CALLBACK | API | WEBRTC_API | MONITOR | DISCONNECT | EXTERNAL_OUTBOUND Valid metric filter values for DISCONNECT_REASON: CUSTOMER_DISCONNECT | AGENT_DISCONNECT | THIRD_PARTY_DISCONNECT | TELECOM_PROBLEM | BARGED | CONTACT_FLOW_DISCONNECT | OTHER | EXPIRED | API public let metricFilterValues: [String]? /// The flag to use to filter on requested metric filter values or to not filter on requested metric filter values. By default the negate is false, which indicates to filter on the requested metric filter. public let negate: Bool? @@ -18013,6 +18014,103 @@ extension Connect { } } + public struct StartOutboundChatContactRequest: AWSEncodableShape { + /// A custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in flows just like any other contact attributes. + public let attributes: [String: String]? + /// The total duration of the newly started chat session. If not specified, the chat session duration defaults to 25 hour. The minimum configurable time is 60 minutes. The maximum configurable time is 10,080 minutes (7 days). + public let chatDurationInMinutes: Int? + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. The token is valid for 7 days after creation. If a contact is already started, the contact ID is returned. + public let clientToken: String? + /// The identifier of the flow for the call. To see the ContactFlowId in the Amazon Connect console user interface, on the navigation menu go to Routing, Contact Flows. Choose the flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/123ec456-a007-89c0-1234-xxxxxxxxxxxx + public let contactFlowId: String + public let destinationEndpoint: Endpoint + public let initialSystemMessage: ChatMessage? + /// The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. + public let instanceId: String + public let participantDetails: ParticipantDetails? + /// The unique identifier for an Amazon Connect contact. This identifier is related to the contact starting. + public let relatedContactId: String? + /// A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in flows. Attribute keys can include only alphanumeric, -, and _. This field can be used to show channel subtype, such as connect:Guide and connect:SMS. + public let segmentAttributes: [String: SegmentAttributeValue] + public let sourceEndpoint: Endpoint + /// The supported chat message content types. Supported types are: text/plain text/markdown application/json, application/vnd.amazonaws.connect.message.interactive application/vnd.amazonaws.connect.message.interactive.response Content types must always contain text/plain. You can then put any other supported type in the list. For example, all the following lists are valid because they contain text/plain: [text/plain, text/markdown, application/json] [text/markdown, text/plain] [text/plain, application/json, application/vnd.amazonaws.connect.message.interactive.response] + public let supportedMessagingContentTypes: [String]? + + @inlinable + public init(attributes: [String: String]? = nil, chatDurationInMinutes: Int? = nil, clientToken: String? = StartOutboundChatContactRequest.idempotencyToken(), contactFlowId: String, destinationEndpoint: Endpoint, initialSystemMessage: ChatMessage? = nil, instanceId: String, participantDetails: ParticipantDetails? = nil, relatedContactId: String? = nil, segmentAttributes: [String: SegmentAttributeValue], sourceEndpoint: Endpoint, supportedMessagingContentTypes: [String]? = nil) { + self.attributes = attributes + self.chatDurationInMinutes = chatDurationInMinutes + self.clientToken = clientToken + self.contactFlowId = contactFlowId + self.destinationEndpoint = destinationEndpoint + self.initialSystemMessage = initialSystemMessage + self.instanceId = instanceId + self.participantDetails = participantDetails + self.relatedContactId = relatedContactId + self.segmentAttributes = segmentAttributes + self.sourceEndpoint = sourceEndpoint + self.supportedMessagingContentTypes = supportedMessagingContentTypes + } + + public func validate(name: String) throws { + try self.attributes?.forEach { + try validate($0.key, name: "attributes.key", parent: name, max: 32767) + try validate($0.key, name: "attributes.key", parent: name, min: 1) + try validate($0.value, name: "attributes[\"\($0.key)\"]", parent: name, max: 32767) + } + try self.validate(self.chatDurationInMinutes, name: "chatDurationInMinutes", parent: name, max: 10080) + try self.validate(self.chatDurationInMinutes, name: "chatDurationInMinutes", parent: name, min: 60) + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 500) + try self.validate(self.contactFlowId, name: "contactFlowId", parent: name, max: 500) + try self.destinationEndpoint.validate(name: "\(name).destinationEndpoint") + try self.initialSystemMessage?.validate(name: "\(name).initialSystemMessage") + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 100) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 1) + try self.participantDetails?.validate(name: "\(name).participantDetails") + try self.validate(self.relatedContactId, name: "relatedContactId", parent: name, max: 256) + try self.validate(self.relatedContactId, name: "relatedContactId", parent: name, min: 1) + try self.segmentAttributes.forEach { + try validate($0.key, name: "segmentAttributes.key", parent: name, max: 128) + try validate($0.key, name: "segmentAttributes.key", parent: name, min: 1) + try $0.value.validate(name: "\(name).segmentAttributes[\"\($0.key)\"]") + } + try self.sourceEndpoint.validate(name: "\(name).sourceEndpoint") + try self.supportedMessagingContentTypes?.forEach { + try validate($0, name: "supportedMessagingContentTypes[]", parent: name, max: 100) + try validate($0, name: "supportedMessagingContentTypes[]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case attributes = "Attributes" + case chatDurationInMinutes = "ChatDurationInMinutes" + case clientToken = "ClientToken" + case contactFlowId = "ContactFlowId" + case destinationEndpoint = "DestinationEndpoint" + case initialSystemMessage = "InitialSystemMessage" + case instanceId = "InstanceId" + case participantDetails = "ParticipantDetails" + case relatedContactId = "RelatedContactId" + case segmentAttributes = "SegmentAttributes" + case sourceEndpoint = "SourceEndpoint" + case supportedMessagingContentTypes = "SupportedMessagingContentTypes" + } + } + + public struct StartOutboundChatContactResponse: AWSDecodableShape { + /// The identifier of this contact within the Amazon Connect instance. + public let contactId: String? + + @inlinable + public init(contactId: String? = nil) { + self.contactId = contactId + } + + private enum CodingKeys: String, CodingKey { + case contactId = "ContactId" + } + } + public struct StartOutboundVoiceContactRequest: AWSEncodableShape { /// Configuration of the answering machine detection for this outbound call. public let answerMachineDetectionConfig: AnswerMachineDetectionConfig? @@ -19010,7 +19108,7 @@ extension Connect { } public struct ThresholdV2: AWSEncodableShape & AWSDecodableShape { - /// The type of comparison. Only "less than" (LT) and "greater than" (GT) comparisons are supported. + /// The type of comparison. Currently, "less than" (LT), "less than equal" (LTE), and "greater than" (GT) comparisons are supported. public let comparison: String? /// The threshold value to compare. public let thresholdValue: Double? diff --git a/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift b/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift index c429bbd4c2..b56d5edad5 100644 --- a/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift +++ b/Sources/Soto/Services/CostExplorer/CostExplorer_api.swift @@ -664,7 +664,7 @@ public struct CostExplorer: AWSService { /// Retrieves all available filter values for a specified filter over a period of time. You can search the dimension values for an arbitrary string. /// /// Parameters: - /// - context: The context for the call to GetDimensionValues. This can be RESERVATIONS or COST_AND_USAGE. The default value is COST_AND_USAGE. If the context is set to RESERVATIONS, the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE, the resulting dimension values can be used in the GetCostAndUsage operation. If you set the context to COST_AND_USAGE, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following: - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services. - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Services in India. - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized (for example, C4, C5, C6g, and C7g), Memory Optimization (for example, R4, R5n, R5b, and R6g). INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice. LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. OPERATION - The action performed. Examples include RunInstance and CreateBucket. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances. RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute). SERVICE - The Amazon Web Services service such as Amazon DynamoDB. TENANCY - The tenancy of a resource. Examples are shared or dedicated. USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs. USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute. REGION - The Amazon Web Services Region. RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits. RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service. If you set the context to RESERVATIONS, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. REGION - The Amazon Web Services Region. SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone. TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI). TENANCY - The tenancy of a resource. Examples are shared or dedicated. If you set the context to SAVINGS_PLANS, you can use the following dimensions for searching: SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront) REGION - The Amazon Web Services Region. INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. + /// - context: The context for the call to GetDimensionValues. This can be RESERVATIONS or COST_AND_USAGE. The default value is COST_AND_USAGE. If the context is set to RESERVATIONS, the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE, the resulting dimension values can be used in the GetCostAndUsage operation. If you set the context to COST_AND_USAGE, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following: - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Servicesservices. - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Servicesservices in India. - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized (for example, C4, C5, C6g, and C7g), Memory Optimization (for example, R4, R5n, R5b, and R6g). INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice. LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. OPERATION - The action performed. Examples include RunInstance and CreateBucket. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances. RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute). SERVICE - The Amazon Web Services service such as Amazon DynamoDB. TENANCY - The tenancy of a resource. Examples are shared or dedicated. USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs. USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute. REGION - The Amazon Web Services Region. RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits. RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service. If you set the context to RESERVATIONS, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. REGION - The Amazon Web Services Region. SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone. TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI). TENANCY - The tenancy of a resource. Examples are shared or dedicated. If you set the context to SAVINGS_PLANS, you can use the following dimensions for searching: SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront) REGION - The Amazon Web Services Region. INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. /// - dimension: The name of the dimension. Each Dimension is available for a different Context. For more information, see Context. LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule. /// - filter: /// - maxResults: This field is only used when SortBy is provided in the request. The maximum number of objects that are returned for this request. If MaxResults isn't specified with SortBy, the request returns 1000 results as the default value for this parameter. For GetDimensionValues, MaxResults has an upper limit of 1000. diff --git a/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift b/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift index 4b0e39f8a3..bdb8159294 100644 --- a/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift +++ b/Sources/Soto/Services/CostExplorer/CostExplorer_shapes.swift @@ -335,7 +335,7 @@ extension CostExplorer { public let anomalyScore: AnomalyScore /// The first day the anomaly is detected. public let anomalyStartDate: String? - /// The dimension for the anomaly (for example, an Amazon Web Service in a service monitor). + /// The dimension for the anomaly (for example, an Amazon Web Servicesservice in a service monitor). public let dimensionValue: String? /// The feedback value. public let feedback: AnomalyFeedbackType? @@ -1435,6 +1435,24 @@ extension CostExplorer { } } + public struct DynamoDBCapacityDetails: AWSDecodableShape { + /// The capacity unit of the recommended reservation. + public let capacityUnits: String? + /// The Amazon Web Services Region of the recommended reservation. + public let region: String? + + @inlinable + public init(capacityUnits: String? = nil, region: String? = nil) { + self.capacityUnits = capacityUnits + self.region = region + } + + private enum CodingKeys: String, CodingKey { + case capacityUnits = "CapacityUnits" + case region = "Region" + } + } + public struct EBSResourceUtilization: AWSDecodableShape { /// The maximum size of read operations per second public let ebsReadBytesPerSecond: String? @@ -2264,7 +2282,7 @@ extension CostExplorer { } public struct GetDimensionValuesRequest: AWSEncodableShape { - /// The context for the call to GetDimensionValues. This can be RESERVATIONS or COST_AND_USAGE. The default value is COST_AND_USAGE. If the context is set to RESERVATIONS, the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE, the resulting dimension values can be used in the GetCostAndUsage operation. If you set the context to COST_AND_USAGE, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following: - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services. - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Services in India. - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized (for example, C4, C5, C6g, and C7g), Memory Optimization (for example, R4, R5n, R5b, and R6g). INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice. LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. OPERATION - The action performed. Examples include RunInstance and CreateBucket. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances. RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute). SERVICE - The Amazon Web Services service such as Amazon DynamoDB. TENANCY - The tenancy of a resource. Examples are shared or dedicated. USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs. USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute. REGION - The Amazon Web Services Region. RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits. RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service. If you set the context to RESERVATIONS, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. REGION - The Amazon Web Services Region. SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone. TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI). TENANCY - The tenancy of a resource. Examples are shared or dedicated. If you set the context to SAVINGS_PLANS, you can use the following dimensions for searching: SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront) REGION - The Amazon Web Services Region. INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. + /// The context for the call to GetDimensionValues. This can be RESERVATIONS or COST_AND_USAGE. The default value is COST_AND_USAGE. If the context is set to RESERVATIONS, the resulting dimension values can be used in the GetReservationUtilization operation. If the context is set to COST_AND_USAGE, the resulting dimension values can be used in the GetCostAndUsage operation. If you set the context to COST_AND_USAGE, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following: - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Servicesservices. - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting reseller for Amazon Web Servicesservices in India. - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on Amazon Web Services by third-party software providers. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized (for example, C4, C5, C6g, and C7g), Memory Optimization (for example, R4, R5n, R5b, and R6g). INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services invoice. LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. OPERATION - The action performed. Examples include RunInstance and CreateBucket. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. PURCHASE_TYPE - The reservation type of the purchase that this usage is related to. Examples include On-Demand Instances and Standard Reserved Instances. RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute). SERVICE - The Amazon Web Services service such as Amazon DynamoDB. TENANCY - The tenancy of a resource. Examples are shared or dedicated. USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues operation includes a unit attribute. Examples include GB and Hrs. USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this operation includes a unit attribute. REGION - The Amazon Web Services Region. RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees, usage costs, tax refunds, and credits. RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service. If you set the context to RESERVATIONS, you can use the following dimensions for searching: AZ - The Availability Zone. An example is us-east-1a. CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux. DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ and MultiAZ. INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge. LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux. REGION - The Amazon Web Services Region. SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone. TAG (Coverage only) - The tags that are associated with a Reserved Instance (RI). TENANCY - The tenancy of a resource. Examples are shared or dedicated. If you set the context to SAVINGS_PLANS, you can use the following dimensions for searching: SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All Upfront) REGION - The Amazon Web Services Region. INSTANCE_TYPE_FAMILY - The family of instances (For example, m5) LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value field contains the Amazon Web Services ID of the member account. SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans. public let context: Context? /// The name of the dimension. Each Dimension is available for a different Context. For more information, see Context. LINK_ACCOUNT_NAME and SERVICE_CODE can only be used in CostCategoryRule. public let dimension: Dimension @@ -3922,13 +3940,15 @@ extension CostExplorer { public let accountId: String? /// The average number of normalized units that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. public let averageNormalizedUnitsUsedPerHour: String? + /// The average number of provisioned capacity units that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. + public let averageNumberOfCapacityUnitsUsedPerHour: String? /// The average number of instances that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. public let averageNumberOfInstancesUsedPerHour: String? - /// The average utilization of your instances. Amazon Web Services uses this to calculate your recommended reservation purchases. + /// The average utilization of your recommendations. Amazon Web Services uses this to calculate your recommended reservation purchases. public let averageUtilization: String? - /// The currency code that Amazon Web Services used to calculate the costs for this instance. + /// The currency code that Amazon Web Services used to calculate the costs for this recommendation. public let currencyCode: String? - /// How long Amazon Web Services estimates that it takes for this instance to start saving you money, in months. + /// How long Amazon Web Services estimates that it takes for this recommendation to start saving you money, in months. public let estimatedBreakEvenInMonths: String? /// How much Amazon Web Services estimates that you spend on On-Demand Instances in a month. public let estimatedMonthlyOnDemandCost: String? @@ -3942,25 +3962,34 @@ extension CostExplorer { public let instanceDetails: InstanceDetails? /// The maximum number of normalized units that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. public let maximumNormalizedUnitsUsedPerHour: String? + /// The maximum number of provisioned capacity units that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. + public let maximumNumberOfCapacityUnitsUsedPerHour: String? /// The maximum number of instances that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. public let maximumNumberOfInstancesUsedPerHour: String? /// The minimum number of normalized units that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. public let minimumNormalizedUnitsUsedPerHour: String? + /// The minimum number of provisioned capacity units that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. + public let minimumNumberOfCapacityUnitsUsedPerHour: String? /// The minimum number of instances that you used in an hour during the historical period. Amazon Web Services uses this to calculate your recommended reservation purchases. public let minimumNumberOfInstancesUsedPerHour: String? /// The number of normalized units that Amazon Web Services recommends that you purchase. public let recommendedNormalizedUnitsToPurchase: String? + /// The number of reserved capacity units that Amazon Web Services recommends that you purchase. + public let recommendedNumberOfCapacityUnitsToPurchase: String? /// The number of instances that Amazon Web Services recommends that you purchase. public let recommendedNumberOfInstancesToPurchase: String? - /// How much purchasing this instance costs you on a monthly basis. + /// How much purchasing this recommendation costs you on a monthly basis. public let recurringStandardMonthlyCost: String? - /// How much purchasing this instance costs you upfront. + /// Details about the reservations that Amazon Web Services recommends that you purchase. + public let reservedCapacityDetails: ReservedCapacityDetails? + /// How much purchasing this recommendation costs you upfront. public let upfrontCost: String? @inlinable - public init(accountId: String? = nil, averageNormalizedUnitsUsedPerHour: String? = nil, averageNumberOfInstancesUsedPerHour: String? = nil, averageUtilization: String? = nil, currencyCode: String? = nil, estimatedBreakEvenInMonths: String? = nil, estimatedMonthlyOnDemandCost: String? = nil, estimatedMonthlySavingsAmount: String? = nil, estimatedMonthlySavingsPercentage: String? = nil, estimatedReservationCostForLookbackPeriod: String? = nil, instanceDetails: InstanceDetails? = nil, maximumNormalizedUnitsUsedPerHour: String? = nil, maximumNumberOfInstancesUsedPerHour: String? = nil, minimumNormalizedUnitsUsedPerHour: String? = nil, minimumNumberOfInstancesUsedPerHour: String? = nil, recommendedNormalizedUnitsToPurchase: String? = nil, recommendedNumberOfInstancesToPurchase: String? = nil, recurringStandardMonthlyCost: String? = nil, upfrontCost: String? = nil) { + public init(accountId: String? = nil, averageNormalizedUnitsUsedPerHour: String? = nil, averageNumberOfCapacityUnitsUsedPerHour: String? = nil, averageNumberOfInstancesUsedPerHour: String? = nil, averageUtilization: String? = nil, currencyCode: String? = nil, estimatedBreakEvenInMonths: String? = nil, estimatedMonthlyOnDemandCost: String? = nil, estimatedMonthlySavingsAmount: String? = nil, estimatedMonthlySavingsPercentage: String? = nil, estimatedReservationCostForLookbackPeriod: String? = nil, instanceDetails: InstanceDetails? = nil, maximumNormalizedUnitsUsedPerHour: String? = nil, maximumNumberOfCapacityUnitsUsedPerHour: String? = nil, maximumNumberOfInstancesUsedPerHour: String? = nil, minimumNormalizedUnitsUsedPerHour: String? = nil, minimumNumberOfCapacityUnitsUsedPerHour: String? = nil, minimumNumberOfInstancesUsedPerHour: String? = nil, recommendedNormalizedUnitsToPurchase: String? = nil, recommendedNumberOfCapacityUnitsToPurchase: String? = nil, recommendedNumberOfInstancesToPurchase: String? = nil, recurringStandardMonthlyCost: String? = nil, reservedCapacityDetails: ReservedCapacityDetails? = nil, upfrontCost: String? = nil) { self.accountId = accountId self.averageNormalizedUnitsUsedPerHour = averageNormalizedUnitsUsedPerHour + self.averageNumberOfCapacityUnitsUsedPerHour = averageNumberOfCapacityUnitsUsedPerHour self.averageNumberOfInstancesUsedPerHour = averageNumberOfInstancesUsedPerHour self.averageUtilization = averageUtilization self.currencyCode = currencyCode @@ -3971,18 +4000,23 @@ extension CostExplorer { self.estimatedReservationCostForLookbackPeriod = estimatedReservationCostForLookbackPeriod self.instanceDetails = instanceDetails self.maximumNormalizedUnitsUsedPerHour = maximumNormalizedUnitsUsedPerHour + self.maximumNumberOfCapacityUnitsUsedPerHour = maximumNumberOfCapacityUnitsUsedPerHour self.maximumNumberOfInstancesUsedPerHour = maximumNumberOfInstancesUsedPerHour self.minimumNormalizedUnitsUsedPerHour = minimumNormalizedUnitsUsedPerHour + self.minimumNumberOfCapacityUnitsUsedPerHour = minimumNumberOfCapacityUnitsUsedPerHour self.minimumNumberOfInstancesUsedPerHour = minimumNumberOfInstancesUsedPerHour self.recommendedNormalizedUnitsToPurchase = recommendedNormalizedUnitsToPurchase + self.recommendedNumberOfCapacityUnitsToPurchase = recommendedNumberOfCapacityUnitsToPurchase self.recommendedNumberOfInstancesToPurchase = recommendedNumberOfInstancesToPurchase self.recurringStandardMonthlyCost = recurringStandardMonthlyCost + self.reservedCapacityDetails = reservedCapacityDetails self.upfrontCost = upfrontCost } private enum CodingKeys: String, CodingKey { case accountId = "AccountId" case averageNormalizedUnitsUsedPerHour = "AverageNormalizedUnitsUsedPerHour" + case averageNumberOfCapacityUnitsUsedPerHour = "AverageNumberOfCapacityUnitsUsedPerHour" case averageNumberOfInstancesUsedPerHour = "AverageNumberOfInstancesUsedPerHour" case averageUtilization = "AverageUtilization" case currencyCode = "CurrencyCode" @@ -3993,12 +4027,16 @@ extension CostExplorer { case estimatedReservationCostForLookbackPeriod = "EstimatedReservationCostForLookbackPeriod" case instanceDetails = "InstanceDetails" case maximumNormalizedUnitsUsedPerHour = "MaximumNormalizedUnitsUsedPerHour" + case maximumNumberOfCapacityUnitsUsedPerHour = "MaximumNumberOfCapacityUnitsUsedPerHour" case maximumNumberOfInstancesUsedPerHour = "MaximumNumberOfInstancesUsedPerHour" case minimumNormalizedUnitsUsedPerHour = "MinimumNormalizedUnitsUsedPerHour" + case minimumNumberOfCapacityUnitsUsedPerHour = "MinimumNumberOfCapacityUnitsUsedPerHour" case minimumNumberOfInstancesUsedPerHour = "MinimumNumberOfInstancesUsedPerHour" case recommendedNormalizedUnitsToPurchase = "RecommendedNormalizedUnitsToPurchase" + case recommendedNumberOfCapacityUnitsToPurchase = "RecommendedNumberOfCapacityUnitsToPurchase" case recommendedNumberOfInstancesToPurchase = "RecommendedNumberOfInstancesToPurchase" case recurringStandardMonthlyCost = "RecurringStandardMonthlyCost" + case reservedCapacityDetails = "ReservedCapacityDetails" case upfrontCost = "UpfrontCost" } } @@ -4073,6 +4111,20 @@ extension CostExplorer { } } + public struct ReservedCapacityDetails: AWSDecodableShape { + /// The DynamoDB reservations that Amazon Web Services recommends that you purchase. + public let dynamoDBCapacityDetails: DynamoDBCapacityDetails? + + @inlinable + public init(dynamoDBCapacityDetails: DynamoDBCapacityDetails? = nil) { + self.dynamoDBCapacityDetails = dynamoDBCapacityDetails + } + + private enum CodingKeys: String, CodingKey { + case dynamoDBCapacityDetails = "DynamoDBCapacityDetails" + } + } + public struct ResourceDetails: AWSDecodableShape { /// Details for the Amazon EC2 resource. public let ec2ResourceDetails: EC2ResourceDetails? @@ -4264,7 +4316,7 @@ extension CostExplorer { public let linkedAccountName: String? /// The Amazon Web Services Region that's associated with the cost anomaly. public let region: String? - /// The Amazon Web Service name that's associated with the cost anomaly. + /// The Amazon Web Servicesservice name that's associated with the cost anomaly. public let service: String? /// The UsageType value that's associated with the cost anomaly. public let usageType: String? diff --git a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift index 61328f6d4b..a4728dfe18 100644 --- a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift +++ b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS CustomerProfiles service. /// -/// Amazon Connect Customer Profiles Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. For more information about the Amazon Connect Customer Profiles feature, see Use Customer Profiles in the Amazon Connect Administrator's Guide. +/// Amazon Connect Customer Profiles Customer Profiles actions Customer Profiles data types Amazon Connect Customer Profiles is a unified customer profile for your contact center that has pre-built connectors powered by AppFlow that make it easy to combine customer information from third party applications, such as Salesforce (CRM), ServiceNow (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center. For more information about the Amazon Connect Customer Profiles feature, see Use Customer Profiles in the Amazon Connect Administrator's Guide. public struct CustomerProfiles: AWSService { // MARK: Member variables @@ -1713,6 +1713,7 @@ public struct CustomerProfiles: AWSService { /// - flowDefinition: The configuration that controls how Customer Profiles retrieves data from the source. /// - objectTypeName: The name of the profile object type. /// - objectTypeNames: A map in which each key is an event type from an external application such as Segment or Shopify, and each value is an ObjectTypeName (template) used to ingest the event. + /// - roleArn: The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. /// - tags: The tags used to organize, track, or control access for this resource. /// - uri: The URI of the S3 bucket or any other type of data source. /// - logger: Logger use during operation @@ -1722,6 +1723,7 @@ public struct CustomerProfiles: AWSService { flowDefinition: FlowDefinition? = nil, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, + roleArn: String? = nil, tags: [String: String]? = nil, uri: String? = nil, logger: Logger = AWSClient.loggingDisabled @@ -1731,6 +1733,7 @@ public struct CustomerProfiles: AWSService { flowDefinition: flowDefinition, objectTypeName: objectTypeName, objectTypeNames: objectTypeNames, + roleArn: roleArn, tags: tags, uri: uri ) diff --git a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift index 015bc7664b..ae72975848 100644 --- a/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift +++ b/Sources/Soto/Services/CustomerProfiles/CustomerProfiles_shapes.swift @@ -2745,6 +2745,8 @@ extension CustomerProfiles { /// It supports the following event types: SegmentIdentify, ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, /// ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. public let objectTypeNames: [String: String]? + /// The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. + public let roleArn: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? /// The URI of the S3 bucket or any other type of data source. @@ -2753,13 +2755,14 @@ extension CustomerProfiles { public let workflowId: String? @inlinable - public init(createdAt: Date, domainName: String, isUnstructured: Bool? = nil, lastUpdatedAt: Date, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, tags: [String: String]? = nil, uri: String, workflowId: String? = nil) { + public init(createdAt: Date, domainName: String, isUnstructured: Bool? = nil, lastUpdatedAt: Date, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, roleArn: String? = nil, tags: [String: String]? = nil, uri: String, workflowId: String? = nil) { self.createdAt = createdAt self.domainName = domainName self.isUnstructured = isUnstructured self.lastUpdatedAt = lastUpdatedAt self.objectTypeName = objectTypeName self.objectTypeNames = objectTypeNames + self.roleArn = roleArn self.tags = tags self.uri = uri self.workflowId = workflowId @@ -2772,6 +2775,7 @@ extension CustomerProfiles { case lastUpdatedAt = "LastUpdatedAt" case objectTypeName = "ObjectTypeName" case objectTypeNames = "ObjectTypeNames" + case roleArn = "RoleArn" case tags = "Tags" case uri = "Uri" case workflowId = "WorkflowId" @@ -3783,6 +3787,8 @@ extension CustomerProfiles { /// It supports the following event types: SegmentIdentify, ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, /// ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. public let objectTypeNames: [String: String]? + /// The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. + public let roleArn: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? /// The URI of the S3 bucket or any other type of data source. @@ -3791,13 +3797,14 @@ extension CustomerProfiles { public let workflowId: String? @inlinable - public init(createdAt: Date, domainName: String, isUnstructured: Bool? = nil, lastUpdatedAt: Date, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, tags: [String: String]? = nil, uri: String, workflowId: String? = nil) { + public init(createdAt: Date, domainName: String, isUnstructured: Bool? = nil, lastUpdatedAt: Date, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, roleArn: String? = nil, tags: [String: String]? = nil, uri: String, workflowId: String? = nil) { self.createdAt = createdAt self.domainName = domainName self.isUnstructured = isUnstructured self.lastUpdatedAt = lastUpdatedAt self.objectTypeName = objectTypeName self.objectTypeNames = objectTypeNames + self.roleArn = roleArn self.tags = tags self.uri = uri self.workflowId = workflowId @@ -3810,6 +3817,7 @@ extension CustomerProfiles { case lastUpdatedAt = "LastUpdatedAt" case objectTypeName = "ObjectTypeName" case objectTypeNames = "ObjectTypeNames" + case roleArn = "RoleArn" case tags = "Tags" case uri = "Uri" case workflowId = "WorkflowId" @@ -4720,17 +4728,20 @@ extension CustomerProfiles { /// It supports the following event types: SegmentIdentify, ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, /// ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. public let objectTypeNames: [String: String]? + /// The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. + public let roleArn: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? /// The URI of the S3 bucket or any other type of data source. public let uri: String? @inlinable - public init(domainName: String, flowDefinition: FlowDefinition? = nil, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, tags: [String: String]? = nil, uri: String? = nil) { + public init(domainName: String, flowDefinition: FlowDefinition? = nil, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, roleArn: String? = nil, tags: [String: String]? = nil, uri: String? = nil) { self.domainName = domainName self.flowDefinition = flowDefinition self.objectTypeName = objectTypeName self.objectTypeNames = objectTypeNames + self.roleArn = roleArn self.tags = tags self.uri = uri } @@ -4742,6 +4753,7 @@ extension CustomerProfiles { try container.encodeIfPresent(self.flowDefinition, forKey: .flowDefinition) try container.encodeIfPresent(self.objectTypeName, forKey: .objectTypeName) try container.encodeIfPresent(self.objectTypeNames, forKey: .objectTypeNames) + try container.encodeIfPresent(self.roleArn, forKey: .roleArn) try container.encodeIfPresent(self.tags, forKey: .tags) try container.encodeIfPresent(self.uri, forKey: .uri) } @@ -4761,6 +4773,8 @@ extension CustomerProfiles { try validate($0.value, name: "objectTypeNames[\"\($0.key)\"]", parent: name, min: 1) try validate($0.value, name: "objectTypeNames[\"\($0.key)\"]", parent: name, pattern: "^[a-zA-Z_][a-zA-Z_0-9-]*$") } + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 512) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws:iam:.*:[0-9]+:") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -4777,6 +4791,7 @@ extension CustomerProfiles { case flowDefinition = "FlowDefinition" case objectTypeName = "ObjectTypeName" case objectTypeNames = "ObjectTypeNames" + case roleArn = "RoleArn" case tags = "Tags" case uri = "Uri" } @@ -4797,6 +4812,8 @@ extension CustomerProfiles { /// It supports the following event types: SegmentIdentify, ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, /// ShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders. public let objectTypeNames: [String: String]? + /// The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make Customer Profiles requests on your behalf. + public let roleArn: String? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? /// The URI of the S3 bucket or any other type of data source. @@ -4805,13 +4822,14 @@ extension CustomerProfiles { public let workflowId: String? @inlinable - public init(createdAt: Date, domainName: String, isUnstructured: Bool? = nil, lastUpdatedAt: Date, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, tags: [String: String]? = nil, uri: String, workflowId: String? = nil) { + public init(createdAt: Date, domainName: String, isUnstructured: Bool? = nil, lastUpdatedAt: Date, objectTypeName: String? = nil, objectTypeNames: [String: String]? = nil, roleArn: String? = nil, tags: [String: String]? = nil, uri: String, workflowId: String? = nil) { self.createdAt = createdAt self.domainName = domainName self.isUnstructured = isUnstructured self.lastUpdatedAt = lastUpdatedAt self.objectTypeName = objectTypeName self.objectTypeNames = objectTypeNames + self.roleArn = roleArn self.tags = tags self.uri = uri self.workflowId = workflowId @@ -4824,6 +4842,7 @@ extension CustomerProfiles { case lastUpdatedAt = "LastUpdatedAt" case objectTypeName = "ObjectTypeName" case objectTypeNames = "ObjectTypeNames" + case roleArn = "RoleArn" case tags = "Tags" case uri = "Uri" case workflowId = "WorkflowId" diff --git a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift index 4e13db1293..4ce8fc21a2 100644 --- a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift +++ b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_api.swift @@ -221,6 +221,59 @@ public struct DatabaseMigrationService: AWSService { return try await self.cancelReplicationTaskAssessmentRun(input, logger: logger) } + /// Creates a data migration using the provided settings. + @Sendable + @inlinable + public func createDataMigration(_ input: CreateDataMigrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataMigrationResponse { + try await self.client.execute( + operation: "CreateDataMigration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a data migration using the provided settings. + /// + /// Parameters: + /// - dataMigrationName: A user-friendly name for the data migration. Data migration names have the following constraints: Must begin with a letter, and can only contain ASCII letters, digits, and hyphens. Can't end with a hyphen or contain two consecutive hyphens. Length must be from 1 to 255 characters. + /// - dataMigrationType: Specifies if the data migration is full-load only, change data capture (CDC) only, or full-load and CDC. + /// - enableCloudwatchLogs: Specifies whether to enable CloudWatch logs for the data migration. + /// - migrationProjectIdentifier: An identifier for the migration project. + /// - numberOfJobs: The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target. + /// - selectionRules: An optional JSON string specifying what tables, views, and schemas to include or exclude from the migration. + /// - serviceAccessRoleArn: The Amazon Resource Name (ARN) for the service access role that you want to use to create the data migration. + /// - sourceDataSettings: Specifies information about the source data provider. + /// - tags: One or more tags to be assigned to the data migration. + /// - logger: Logger use during operation + @inlinable + public func createDataMigration( + dataMigrationName: String? = nil, + dataMigrationType: MigrationTypeValue, + enableCloudwatchLogs: Bool? = nil, + migrationProjectIdentifier: String, + numberOfJobs: Int? = nil, + selectionRules: String? = nil, + serviceAccessRoleArn: String, + sourceDataSettings: [SourceDataSetting]? = nil, + tags: [Tag]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateDataMigrationResponse { + let input = CreateDataMigrationMessage( + dataMigrationName: dataMigrationName, + dataMigrationType: dataMigrationType, + enableCloudwatchLogs: enableCloudwatchLogs, + migrationProjectIdentifier: migrationProjectIdentifier, + numberOfJobs: numberOfJobs, + selectionRules: selectionRules, + serviceAccessRoleArn: serviceAccessRoleArn, + sourceDataSettings: sourceDataSettings, + tags: tags + ) + return try await self.createDataMigration(input, logger: logger) + } + /// Creates a data provider using the provided settings. A data provider stores a data store type and location information about your database. @Sendable @inlinable @@ -286,7 +339,7 @@ public struct DatabaseMigrationService: AWSService { /// - elasticsearchSettings: Settings in JSON format for the target OpenSearch endpoint. For more information about the available settings, see Extra Connection Attributes When Using OpenSearch as a Target for DMS in the Database Migration Service User Guide. /// - endpointIdentifier: The database endpoint identifier. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens. /// - endpointType: The type of endpoint. Valid values are source and target. - /// - engineName: The type of engine for the endpoint. Valid values, depending on the EndpointType value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", "opensearch", "redshift", "s3", "db2", "db2-zos", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", and "babelfish". + /// - engineName: The type of engine for the endpoint. Valid values, depending on the EndpointType value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", "opensearch", "redshift", "s3", "db2", "db2-zos", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", "babelfish", redshift-serverless, aurora-serverless, aurora-postgresql-serverless, gcp-mysql, azure-sql-managed-instance, redis, dms-transfer. /// - externalTableDefinition: The external table definition. /// - extraConnectionAttributes: Additional attributes associated with the connection. Each attribute is specified as a name-value pair associated by an equal sign (=). Multiple attributes are separated by a semicolon (;) with no additional white space. For information on the attributes available for connecting your source or target endpoint, see Working with DMS Endpoints in the Database Migration Service User Guide. /// - gcpMySQLSettings: Settings in JSON format for the source GCP MySQL endpoint. @@ -728,7 +781,7 @@ public struct DatabaseMigrationService: AWSService { /// /// Parameters: /// - replicationSubnetGroupDescription: The description for the subnet group. - /// - replicationSubnetGroupIdentifier: The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be "default". Example: mySubnetgroup + /// - replicationSubnetGroupIdentifier: The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, or hyphens. Must not be "default". Example: mySubnetgroup /// - subnetIds: Two or more subnet IDs to be assigned to the subnet group. /// - tags: One or more tags to be assigned to the subnet group. /// - logger: Logger use during operation @@ -875,6 +928,35 @@ public struct DatabaseMigrationService: AWSService { return try await self.deleteConnection(input, logger: logger) } + /// Deletes the specified data migration. + @Sendable + @inlinable + public func deleteDataMigration(_ input: DeleteDataMigrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataMigrationResponse { + try await self.client.execute( + operation: "DeleteDataMigration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the specified data migration. + /// + /// Parameters: + /// - dataMigrationIdentifier: The identifier (name or ARN) of the data migration to delete. + /// - logger: Logger use during operation + @inlinable + public func deleteDataMigration( + dataMigrationIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteDataMigrationResponse { + let input = DeleteDataMigrationMessage( + dataMigrationIdentifier: dataMigrationIdentifier + ) + return try await self.deleteDataMigration(input, logger: logger) + } + /// Deletes the specified data provider. All migration projects associated with the data provider must be deleted or modified before you can delete the data provider. @Sendable @inlinable @@ -1395,6 +1477,47 @@ public struct DatabaseMigrationService: AWSService { return try await self.describeConversionConfiguration(input, logger: logger) } + /// Returns information about data migrations. + @Sendable + @inlinable + public func describeDataMigrations(_ input: DescribeDataMigrationsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDataMigrationsResponse { + try await self.client.execute( + operation: "DescribeDataMigrations", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about data migrations. + /// + /// Parameters: + /// - filters: Filters applied to the data migrations. + /// - marker: An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. + /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. + /// - withoutSettings: An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default). + /// - withoutStatistics: An option to set to avoid returning information about statistics. Use this to reduce overhead when statistics information is too large. To use this option, choose true; otherwise, choose false (the default). + /// - logger: Logger use during operation + @inlinable + public func describeDataMigrations( + filters: [Filter]? = nil, + marker: String? = nil, + maxRecords: Int? = nil, + withoutSettings: Bool? = nil, + withoutStatistics: Bool? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeDataMigrationsResponse { + let input = DescribeDataMigrationsMessage( + filters: filters, + marker: marker, + maxRecords: maxRecords, + withoutSettings: withoutSettings, + withoutStatistics: withoutStatistics + ) + return try await self.describeDataMigrations(input, logger: logger) + } + /// Returns a paginated list of data providers for your account in the current region. @Sendable @inlinable @@ -2898,6 +3021,56 @@ public struct DatabaseMigrationService: AWSService { return try await self.modifyConversionConfiguration(input, logger: logger) } + /// Modifies an existing DMS data migration. + @Sendable + @inlinable + public func modifyDataMigration(_ input: ModifyDataMigrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyDataMigrationResponse { + try await self.client.execute( + operation: "ModifyDataMigration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies an existing DMS data migration. + /// + /// Parameters: + /// - dataMigrationIdentifier: The identifier (name or ARN) of the data migration to modify. + /// - dataMigrationName: The new name for the data migration. + /// - dataMigrationType: The new migration type for the data migration. + /// - enableCloudwatchLogs: Whether to enable Cloudwatch logs for the data migration. + /// - numberOfJobs: The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target. + /// - selectionRules: A JSON-formatted string that defines what objects to include and exclude from the migration. + /// - serviceAccessRoleArn: The new service access role ARN for the data migration. + /// - sourceDataSettings: The new information about the source data provider for the data migration. + /// - logger: Logger use during operation + @inlinable + public func modifyDataMigration( + dataMigrationIdentifier: String, + dataMigrationName: String? = nil, + dataMigrationType: MigrationTypeValue? = nil, + enableCloudwatchLogs: Bool? = nil, + numberOfJobs: Int? = nil, + selectionRules: String? = nil, + serviceAccessRoleArn: String? = nil, + sourceDataSettings: [SourceDataSetting]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ModifyDataMigrationResponse { + let input = ModifyDataMigrationMessage( + dataMigrationIdentifier: dataMigrationIdentifier, + dataMigrationName: dataMigrationName, + dataMigrationType: dataMigrationType, + enableCloudwatchLogs: enableCloudwatchLogs, + numberOfJobs: numberOfJobs, + selectionRules: selectionRules, + serviceAccessRoleArn: serviceAccessRoleArn, + sourceDataSettings: sourceDataSettings + ) + return try await self.modifyDataMigration(input, logger: logger) + } + /// Modifies the specified data provider using the provided settings. You must remove the data provider from all migration projects before you can modify it. @Sendable @inlinable @@ -3631,6 +3804,38 @@ public struct DatabaseMigrationService: AWSService { ) } + /// Starts the specified data migration. + @Sendable + @inlinable + public func startDataMigration(_ input: StartDataMigrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> StartDataMigrationResponse { + try await self.client.execute( + operation: "StartDataMigration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Starts the specified data migration. + /// + /// Parameters: + /// - dataMigrationIdentifier: The identifier (name or ARN) of the data migration to start. + /// - startType: Specifies the start type for the data migration. Valid values include start-replication, reload-target, and resume-processing. + /// - logger: Logger use during operation + @inlinable + public func startDataMigration( + dataMigrationIdentifier: String, + startType: StartReplicationMigrationTypeValue, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StartDataMigrationResponse { + let input = StartDataMigrationMessage( + dataMigrationIdentifier: dataMigrationIdentifier, + startType: startType + ) + return try await self.startDataMigration(input, logger: logger) + } + /// Applies the extension pack to your target database. An extension pack is an add-on module that emulates functions present in a source database that are required when converting objects to the target database. @Sendable @inlinable @@ -4031,6 +4236,35 @@ public struct DatabaseMigrationService: AWSService { return try await self.startReplicationTaskAssessmentRun(input, logger: logger) } + /// Stops the specified data migration. + @Sendable + @inlinable + public func stopDataMigration(_ input: StopDataMigrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> StopDataMigrationResponse { + try await self.client.execute( + operation: "StopDataMigration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Stops the specified data migration. + /// + /// Parameters: + /// - dataMigrationIdentifier: The identifier (name or ARN) of the data migration to stop. + /// - logger: Logger use during operation + @inlinable + public func stopDataMigration( + dataMigrationIdentifier: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StopDataMigrationResponse { + let input = StopDataMigrationMessage( + dataMigrationIdentifier: dataMigrationIdentifier + ) + return try await self.stopDataMigration(input, logger: logger) + } + /// For a given DMS Serverless replication configuration, DMS stops any and all ongoing DMS Serverless replications. This command doesn't deprovision the stopped replications. @Sendable @inlinable @@ -4287,6 +4521,49 @@ extension DatabaseMigrationService { return self.describeConnectionsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``describeDataMigrations(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func describeDataMigrationsPaginator( + _ input: DescribeDataMigrationsMessage, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeDataMigrations, + inputKey: \DescribeDataMigrationsMessage.marker, + outputKey: \DescribeDataMigrationsResponse.marker, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``describeDataMigrations(_:logger:)``. + /// + /// - Parameters: + /// - filters: Filters applied to the data migrations. + /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. + /// - withoutSettings: An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default). + /// - withoutStatistics: An option to set to avoid returning information about statistics. Use this to reduce overhead when statistics information is too large. To use this option, choose true; otherwise, choose false (the default). + /// - logger: Logger used for logging + @inlinable + public func describeDataMigrationsPaginator( + filters: [Filter]? = nil, + maxRecords: Int? = nil, + withoutSettings: Bool? = nil, + withoutStatistics: Bool? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = DescribeDataMigrationsMessage( + filters: filters, + maxRecords: maxRecords, + withoutSettings: withoutSettings, + withoutStatistics: withoutStatistics + ) + return self.describeDataMigrationsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``describeDataProviders(_:logger:)``. /// /// - Parameters: @@ -5699,6 +5976,19 @@ extension DatabaseMigrationService.DescribeConnectionsMessage: AWSPaginateToken } } +extension DatabaseMigrationService.DescribeDataMigrationsMessage: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DatabaseMigrationService.DescribeDataMigrationsMessage { + return .init( + filters: self.filters, + marker: token, + maxRecords: self.maxRecords, + withoutSettings: self.withoutSettings, + withoutStatistics: self.withoutStatistics + ) + } +} + extension DatabaseMigrationService.DescribeDataProvidersMessage: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> DatabaseMigrationService.DescribeDataProvidersMessage { diff --git a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift index e0343873c0..820a9c39ed 100644 --- a/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift +++ b/Sources/Soto/Services/DatabaseMigrationService/DatabaseMigrationService_shapes.swift @@ -249,6 +249,13 @@ extension DatabaseMigrationService { public var description: String { return self.rawValue } } + public enum StartReplicationMigrationTypeValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case reloadTarget = "reload-target" + case resumeProcessing = "resume-processing" + case startReplication = "start-replication" + public var description: String { return self.rawValue } + } + public enum StartReplicationTaskTypeValue: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case reloadTarget = "reload-target" case resumeProcessing = "resume-processing" @@ -697,7 +704,7 @@ extension DatabaseMigrationService { public let kmsKeyId: String? /// Specifies the maximum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the maximum value that you can specify for DMS Serverless is 384. The MaxCapacityUnits parameter is the only DCU parameter you are required to specify. public let maxCapacityUnits: Int? - /// Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU value that you can specify for DMS Serverless is 1. You don't have to specify a value for the MinCapacityUnits parameter. If you don't set this value, DMS scans the current activity of available source tables to identify an optimum setting for this parameter. If there is no current source activity or DMS can't otherwise identify a more appropriate value, it sets this parameter to the minimum DCU value allowed, 1. + /// Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU value that you can specify for DMS Serverless is 1. If you don't set this value, DMS sets this parameter to the minimum DCU value allowed, 1. If there is no current source activity, DMS scales down your replication until it reaches the value specified in MinCapacityUnits. public let minCapacityUnits: Int? /// Specifies whether the DMS Serverless replication is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the MultiAZ parameter is set to true. public let multiAZ: Bool? @@ -768,6 +775,66 @@ extension DatabaseMigrationService { } } + public struct CreateDataMigrationMessage: AWSEncodableShape { + /// A user-friendly name for the data migration. Data migration names have the following constraints: Must begin with a letter, and can only contain ASCII letters, digits, and hyphens. Can't end with a hyphen or contain two consecutive hyphens. Length must be from 1 to 255 characters. + public let dataMigrationName: String? + /// Specifies if the data migration is full-load only, change data capture (CDC) only, or full-load and CDC. + public let dataMigrationType: MigrationTypeValue + /// Specifies whether to enable CloudWatch logs for the data migration. + public let enableCloudwatchLogs: Bool? + /// An identifier for the migration project. + public let migrationProjectIdentifier: String + /// The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target. + public let numberOfJobs: Int? + /// An optional JSON string specifying what tables, views, and schemas to include or exclude from the migration. + public let selectionRules: String? + /// The Amazon Resource Name (ARN) for the service access role that you want to use to create the data migration. + public let serviceAccessRoleArn: String + /// Specifies information about the source data provider. + public let sourceDataSettings: [SourceDataSetting]? + /// One or more tags to be assigned to the data migration. + public let tags: [Tag]? + + @inlinable + public init(dataMigrationName: String? = nil, dataMigrationType: MigrationTypeValue, enableCloudwatchLogs: Bool? = nil, migrationProjectIdentifier: String, numberOfJobs: Int? = nil, selectionRules: String? = nil, serviceAccessRoleArn: String, sourceDataSettings: [SourceDataSetting]? = nil, tags: [Tag]? = nil) { + self.dataMigrationName = dataMigrationName + self.dataMigrationType = dataMigrationType + self.enableCloudwatchLogs = enableCloudwatchLogs + self.migrationProjectIdentifier = migrationProjectIdentifier + self.numberOfJobs = numberOfJobs + self.selectionRules = selectionRules + self.serviceAccessRoleArn = serviceAccessRoleArn + self.sourceDataSettings = sourceDataSettings + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case dataMigrationName = "DataMigrationName" + case dataMigrationType = "DataMigrationType" + case enableCloudwatchLogs = "EnableCloudwatchLogs" + case migrationProjectIdentifier = "MigrationProjectIdentifier" + case numberOfJobs = "NumberOfJobs" + case selectionRules = "SelectionRules" + case serviceAccessRoleArn = "ServiceAccessRoleArn" + case sourceDataSettings = "SourceDataSettings" + case tags = "Tags" + } + } + + public struct CreateDataMigrationResponse: AWSDecodableShape { + /// Information about the created data migration. + public let dataMigration: DataMigration? + + @inlinable + public init(dataMigration: DataMigration? = nil) { + self.dataMigration = dataMigration + } + + private enum CodingKeys: String, CodingKey { + case dataMigration = "DataMigration" + } + } + public struct CreateDataProviderMessage: AWSEncodableShape { /// A user-friendly name for the data provider. public let dataProviderName: String? @@ -828,7 +895,7 @@ extension DatabaseMigrationService { public let endpointIdentifier: String /// The type of endpoint. Valid values are source and target. public let endpointType: ReplicationEndpointTypeValue - /// The type of engine for the endpoint. Valid values, depending on the EndpointType value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", "opensearch", "redshift", "s3", "db2", "db2-zos", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", and "babelfish". + /// The type of engine for the endpoint. Valid values, depending on the EndpointType value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", "opensearch", "redshift", "s3", "db2", "db2-zos", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", "kafka", "elasticsearch", "docdb", "sqlserver", "neptune", "babelfish", redshift-serverless, aurora-serverless, aurora-postgresql-serverless, gcp-mysql, azure-sql-managed-instance, redis, dms-transfer. public let engineName: String /// The external table definition. public let externalTableDefinition: String? @@ -1353,7 +1420,7 @@ extension DatabaseMigrationService { public struct CreateReplicationSubnetGroupMessage: AWSEncodableShape { /// The description for the subnet group. public let replicationSubnetGroupDescription: String - /// The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens. Must not be "default". Example: mySubnetgroup + /// The name for the replication subnet group. This value is stored as a lowercase string. Constraints: Must contain no more than 255 alphanumeric characters, periods, underscores, or hyphens. Must not be "default". Example: mySubnetgroup public let replicationSubnetGroupIdentifier: String /// Two or more subnet IDs to be assigned to the subnet group. public let subnetIds: [String] @@ -1466,6 +1533,149 @@ extension DatabaseMigrationService { } } + public struct DataMigration: AWSDecodableShape { + /// The Amazon Resource Name (ARN) that identifies this replication. + public let dataMigrationArn: String? + /// The UTC time when DMS created the data migration. + @OptionalCustomCoding + public var dataMigrationCreateTime: Date? + /// The UTC time when data migration ended. + @OptionalCustomCoding + public var dataMigrationEndTime: Date? + /// The user-friendly name for the data migration. + public let dataMigrationName: String? + /// Specifies CloudWatch settings and selection rules for the data migration. + public let dataMigrationSettings: DataMigrationSettings? + /// The UTC time when DMS started the data migration. + @OptionalCustomCoding + public var dataMigrationStartTime: Date? + /// Provides information about the data migration's run, including start and stop time, latency, and data migration progress. + public let dataMigrationStatistics: DataMigrationStatistics? + /// The current status of the data migration. + public let dataMigrationStatus: String? + /// Specifies whether the data migration is full-load only, change data capture (CDC) only, or full-load and CDC. + public let dataMigrationType: MigrationTypeValue? + /// Information about the data migration's most recent error or failure. + public let lastFailureMessage: String? + /// The Amazon Resource Name (ARN) of the data migration's associated migration project. + public let migrationProjectArn: String? + /// The IP addresses of the endpoints for the data migration. + public let publicIpAddresses: [String]? + /// The IAM role that the data migration uses to access Amazon Web Services resources. + public let serviceAccessRoleArn: String? + /// Specifies information about the data migration's source data provider. + public let sourceDataSettings: [SourceDataSetting]? + /// The reason the data migration last stopped. + public let stopReason: String? + + @inlinable + public init(dataMigrationArn: String? = nil, dataMigrationCreateTime: Date? = nil, dataMigrationEndTime: Date? = nil, dataMigrationName: String? = nil, dataMigrationSettings: DataMigrationSettings? = nil, dataMigrationStartTime: Date? = nil, dataMigrationStatistics: DataMigrationStatistics? = nil, dataMigrationStatus: String? = nil, dataMigrationType: MigrationTypeValue? = nil, lastFailureMessage: String? = nil, migrationProjectArn: String? = nil, publicIpAddresses: [String]? = nil, serviceAccessRoleArn: String? = nil, sourceDataSettings: [SourceDataSetting]? = nil, stopReason: String? = nil) { + self.dataMigrationArn = dataMigrationArn + self.dataMigrationCreateTime = dataMigrationCreateTime + self.dataMigrationEndTime = dataMigrationEndTime + self.dataMigrationName = dataMigrationName + self.dataMigrationSettings = dataMigrationSettings + self.dataMigrationStartTime = dataMigrationStartTime + self.dataMigrationStatistics = dataMigrationStatistics + self.dataMigrationStatus = dataMigrationStatus + self.dataMigrationType = dataMigrationType + self.lastFailureMessage = lastFailureMessage + self.migrationProjectArn = migrationProjectArn + self.publicIpAddresses = publicIpAddresses + self.serviceAccessRoleArn = serviceAccessRoleArn + self.sourceDataSettings = sourceDataSettings + self.stopReason = stopReason + } + + private enum CodingKeys: String, CodingKey { + case dataMigrationArn = "DataMigrationArn" + case dataMigrationCreateTime = "DataMigrationCreateTime" + case dataMigrationEndTime = "DataMigrationEndTime" + case dataMigrationName = "DataMigrationName" + case dataMigrationSettings = "DataMigrationSettings" + case dataMigrationStartTime = "DataMigrationStartTime" + case dataMigrationStatistics = "DataMigrationStatistics" + case dataMigrationStatus = "DataMigrationStatus" + case dataMigrationType = "DataMigrationType" + case lastFailureMessage = "LastFailureMessage" + case migrationProjectArn = "MigrationProjectArn" + case publicIpAddresses = "PublicIpAddresses" + case serviceAccessRoleArn = "ServiceAccessRoleArn" + case sourceDataSettings = "SourceDataSettings" + case stopReason = "StopReason" + } + } + + public struct DataMigrationSettings: AWSDecodableShape { + /// Whether to enable CloudWatch logging for the data migration. + public let cloudwatchLogsEnabled: Bool? + /// The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target. + public let numberOfJobs: Int? + /// A JSON-formatted string that defines what objects to include and exclude from the migration. + public let selectionRules: String? + + @inlinable + public init(cloudwatchLogsEnabled: Bool? = nil, numberOfJobs: Int? = nil, selectionRules: String? = nil) { + self.cloudwatchLogsEnabled = cloudwatchLogsEnabled + self.numberOfJobs = numberOfJobs + self.selectionRules = selectionRules + } + + private enum CodingKeys: String, CodingKey { + case cloudwatchLogsEnabled = "CloudwatchLogsEnabled" + case numberOfJobs = "NumberOfJobs" + case selectionRules = "SelectionRules" + } + } + + public struct DataMigrationStatistics: AWSDecodableShape { + /// The current latency of the change data capture (CDC) operation. + public let cdcLatency: Int? + /// The elapsed duration of the data migration run. + public let elapsedTimeMillis: Int64? + /// The data migration's progress in the full-load migration phase. + public let fullLoadPercentage: Int? + /// The time when the migration started. + @OptionalCustomCoding + public var startTime: Date? + /// The time when the migration stopped or failed. + @OptionalCustomCoding + public var stopTime: Date? + /// The number of tables that DMS failed to process. + public let tablesErrored: Int? + /// The number of tables loaded in the current data migration run. + public let tablesLoaded: Int? + /// The data migration's table loading progress. + public let tablesLoading: Int? + /// The number of tables that are waiting for processing. + public let tablesQueued: Int? + + @inlinable + public init(cdcLatency: Int? = nil, elapsedTimeMillis: Int64? = nil, fullLoadPercentage: Int? = nil, startTime: Date? = nil, stopTime: Date? = nil, tablesErrored: Int? = nil, tablesLoaded: Int? = nil, tablesLoading: Int? = nil, tablesQueued: Int? = nil) { + self.cdcLatency = cdcLatency + self.elapsedTimeMillis = elapsedTimeMillis + self.fullLoadPercentage = fullLoadPercentage + self.startTime = startTime + self.stopTime = stopTime + self.tablesErrored = tablesErrored + self.tablesLoaded = tablesLoaded + self.tablesLoading = tablesLoading + self.tablesQueued = tablesQueued + } + + private enum CodingKeys: String, CodingKey { + case cdcLatency = "CDCLatency" + case elapsedTimeMillis = "ElapsedTimeMillis" + case fullLoadPercentage = "FullLoadPercentage" + case startTime = "StartTime" + case stopTime = "StopTime" + case tablesErrored = "TablesErrored" + case tablesLoaded = "TablesLoaded" + case tablesLoading = "TablesLoading" + case tablesQueued = "TablesQueued" + } + } + public struct DataProvider: AWSDecodableShape { /// The Amazon Resource Name (ARN) string that uniquely identifies the data provider. public let dataProviderArn: String? @@ -1739,6 +1949,34 @@ extension DatabaseMigrationService { } } + public struct DeleteDataMigrationMessage: AWSEncodableShape { + /// The identifier (name or ARN) of the data migration to delete. + public let dataMigrationIdentifier: String + + @inlinable + public init(dataMigrationIdentifier: String) { + self.dataMigrationIdentifier = dataMigrationIdentifier + } + + private enum CodingKeys: String, CodingKey { + case dataMigrationIdentifier = "DataMigrationIdentifier" + } + } + + public struct DeleteDataMigrationResponse: AWSDecodableShape { + /// The deleted data migration. + public let dataMigration: DataMigration? + + @inlinable + public init(dataMigration: DataMigration? = nil) { + self.dataMigration = dataMigration + } + + private enum CodingKeys: String, CodingKey { + case dataMigration = "DataMigration" + } + } + public struct DeleteDataProviderMessage: AWSEncodableShape { /// The identifier of the data provider to delete. public let dataProviderIdentifier: String @@ -2227,6 +2465,58 @@ extension DatabaseMigrationService { } } + public struct DescribeDataMigrationsMessage: AWSEncodableShape { + /// Filters applied to the data migrations. + public let filters: [Filter]? + /// An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. + public let marker: String? + /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. + public let maxRecords: Int? + /// An option to set to avoid returning information about settings. Use this to reduce overhead when setting information is too large. To use this option, choose true; otherwise, choose false (the default). + public let withoutSettings: Bool? + /// An option to set to avoid returning information about statistics. Use this to reduce overhead when statistics information is too large. To use this option, choose true; otherwise, choose false (the default). + public let withoutStatistics: Bool? + + @inlinable + public init(filters: [Filter]? = nil, marker: String? = nil, maxRecords: Int? = nil, withoutSettings: Bool? = nil, withoutStatistics: Bool? = nil) { + self.filters = filters + self.marker = marker + self.maxRecords = maxRecords + self.withoutSettings = withoutSettings + self.withoutStatistics = withoutStatistics + } + + public func validate(name: String) throws { + try self.validate(self.marker, name: "marker", parent: name, max: 1024) + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case marker = "Marker" + case maxRecords = "MaxRecords" + case withoutSettings = "WithoutSettings" + case withoutStatistics = "WithoutStatistics" + } + } + + public struct DescribeDataMigrationsResponse: AWSDecodableShape { + /// Returns information about the data migrations used in the project. + public let dataMigrations: [DataMigration]? + /// An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. + public let marker: String? + + @inlinable + public init(dataMigrations: [DataMigration]? = nil, marker: String? = nil) { + self.dataMigrations = dataMigrations + self.marker = marker + } + + private enum CodingKeys: String, CodingKey { + case dataMigrations = "DataMigrations" + case marker = "Marker" + } + } + public struct DescribeDataProvidersMessage: AWSEncodableShape { /// Filters applied to the data providers described in the form of key-value pairs. Valid filter names: data-provider-identifier public let filters: [Filter]? @@ -5101,6 +5391,62 @@ extension DatabaseMigrationService { } } + public struct ModifyDataMigrationMessage: AWSEncodableShape { + /// The identifier (name or ARN) of the data migration to modify. + public let dataMigrationIdentifier: String + /// The new name for the data migration. + public let dataMigrationName: String? + /// The new migration type for the data migration. + public let dataMigrationType: MigrationTypeValue? + /// Whether to enable Cloudwatch logs for the data migration. + public let enableCloudwatchLogs: Bool? + /// The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target. + public let numberOfJobs: Int? + /// A JSON-formatted string that defines what objects to include and exclude from the migration. + public let selectionRules: String? + /// The new service access role ARN for the data migration. + public let serviceAccessRoleArn: String? + /// The new information about the source data provider for the data migration. + public let sourceDataSettings: [SourceDataSetting]? + + @inlinable + public init(dataMigrationIdentifier: String, dataMigrationName: String? = nil, dataMigrationType: MigrationTypeValue? = nil, enableCloudwatchLogs: Bool? = nil, numberOfJobs: Int? = nil, selectionRules: String? = nil, serviceAccessRoleArn: String? = nil, sourceDataSettings: [SourceDataSetting]? = nil) { + self.dataMigrationIdentifier = dataMigrationIdentifier + self.dataMigrationName = dataMigrationName + self.dataMigrationType = dataMigrationType + self.enableCloudwatchLogs = enableCloudwatchLogs + self.numberOfJobs = numberOfJobs + self.selectionRules = selectionRules + self.serviceAccessRoleArn = serviceAccessRoleArn + self.sourceDataSettings = sourceDataSettings + } + + private enum CodingKeys: String, CodingKey { + case dataMigrationIdentifier = "DataMigrationIdentifier" + case dataMigrationName = "DataMigrationName" + case dataMigrationType = "DataMigrationType" + case enableCloudwatchLogs = "EnableCloudwatchLogs" + case numberOfJobs = "NumberOfJobs" + case selectionRules = "SelectionRules" + case serviceAccessRoleArn = "ServiceAccessRoleArn" + case sourceDataSettings = "SourceDataSettings" + } + } + + public struct ModifyDataMigrationResponse: AWSDecodableShape { + /// Information about the modified data migration. + public let dataMigration: DataMigration? + + @inlinable + public init(dataMigration: DataMigration? = nil) { + self.dataMigration = dataMigration + } + + private enum CodingKeys: String, CodingKey { + case dataMigration = "DataMigration" + } + } + public struct ModifyDataProviderMessage: AWSEncodableShape { /// The identifier of the data provider. Identifiers must begin with a letter and must contain only ASCII letters, digits, and hyphens. They can't end with a hyphen, or contain two consecutive hyphens. public let dataProviderIdentifier: String @@ -8075,6 +8421,66 @@ extension DatabaseMigrationService { } } + public struct SourceDataSetting: AWSEncodableShape & AWSDecodableShape { + /// The change data capture (CDC) start position for the source data provider. + public let cdcStartPosition: String? + /// The change data capture (CDC) start time for the source data provider. + @OptionalCustomCoding + public var cdcStartTime: Date? + /// The change data capture (CDC) stop time for the source data provider. + @OptionalCustomCoding + public var cdcStopTime: Date? + /// The name of the replication slot on the source data provider. This attribute is only valid for a PostgreSQL or Aurora PostgreSQL source. + public let slotName: String? + + @inlinable + public init(cdcStartPosition: String? = nil, cdcStartTime: Date? = nil, cdcStopTime: Date? = nil, slotName: String? = nil) { + self.cdcStartPosition = cdcStartPosition + self.cdcStartTime = cdcStartTime + self.cdcStopTime = cdcStopTime + self.slotName = slotName + } + + private enum CodingKeys: String, CodingKey { + case cdcStartPosition = "CDCStartPosition" + case cdcStartTime = "CDCStartTime" + case cdcStopTime = "CDCStopTime" + case slotName = "SlotName" + } + } + + public struct StartDataMigrationMessage: AWSEncodableShape { + /// The identifier (name or ARN) of the data migration to start. + public let dataMigrationIdentifier: String + /// Specifies the start type for the data migration. Valid values include start-replication, reload-target, and resume-processing. + public let startType: StartReplicationMigrationTypeValue + + @inlinable + public init(dataMigrationIdentifier: String, startType: StartReplicationMigrationTypeValue) { + self.dataMigrationIdentifier = dataMigrationIdentifier + self.startType = startType + } + + private enum CodingKeys: String, CodingKey { + case dataMigrationIdentifier = "DataMigrationIdentifier" + case startType = "StartType" + } + } + + public struct StartDataMigrationResponse: AWSDecodableShape { + /// The data migration that DMS started. + public let dataMigration: DataMigration? + + @inlinable + public init(dataMigration: DataMigration? = nil) { + self.dataMigration = dataMigration + } + + private enum CodingKeys: String, CodingKey { + case dataMigration = "DataMigration" + } + } + public struct StartExtensionPackAssociationMessage: AWSEncodableShape { /// The migration project name or Amazon Resource Name (ARN). public let migrationProjectIdentifier: String @@ -8495,6 +8901,34 @@ extension DatabaseMigrationService { } } + public struct StopDataMigrationMessage: AWSEncodableShape { + /// The identifier (name or ARN) of the data migration to stop. + public let dataMigrationIdentifier: String + + @inlinable + public init(dataMigrationIdentifier: String) { + self.dataMigrationIdentifier = dataMigrationIdentifier + } + + private enum CodingKeys: String, CodingKey { + case dataMigrationIdentifier = "DataMigrationIdentifier" + } + } + + public struct StopDataMigrationResponse: AWSDecodableShape { + /// The data migration that DMS stopped. + public let dataMigration: DataMigration? + + @inlinable + public init(dataMigration: DataMigration? = nil) { + self.dataMigration = dataMigration + } + + private enum CodingKeys: String, CodingKey { + case dataMigration = "DataMigration" + } + } + public struct StopReplicationMessage: AWSEncodableShape { /// The Amazon Resource Name of the replication to stop. public let replicationConfigArn: String @@ -8913,6 +9347,7 @@ public struct DatabaseMigrationServiceErrorType: AWSErrorType { enum Code: String { case accessDeniedFault = "AccessDeniedFault" case collectorNotFoundFault = "CollectorNotFoundFault" + case failedDependencyFault = "FailedDependencyFault" case insufficientResourceCapacityFault = "InsufficientResourceCapacityFault" case invalidCertificateFault = "InvalidCertificateFault" case invalidOperationFault = "InvalidOperationFault" @@ -8960,6 +9395,8 @@ public struct DatabaseMigrationServiceErrorType: AWSErrorType { public static var accessDeniedFault: Self { .init(.accessDeniedFault) } /// The specified collector doesn't exist. public static var collectorNotFoundFault: Self { .init(.collectorNotFoundFault) } + /// A dependency threw an exception. + public static var failedDependencyFault: Self { .init(.failedDependencyFault) } /// There are not enough resources allocated to the database migration. public static var insufficientResourceCapacityFault: Self { .init(.insufficientResourceCapacityFault) } /// The certificate was not valid. diff --git a/Sources/Soto/Services/Deadline/Deadline_api.swift b/Sources/Soto/Services/Deadline/Deadline_api.swift index 6d539af55f..5a71056173 100644 --- a/Sources/Soto/Services/Deadline/Deadline_api.swift +++ b/Sources/Soto/Services/Deadline/Deadline_api.swift @@ -683,6 +683,7 @@ public struct Deadline: AWSService { /// - parameters: The parameters for the job. /// - priority: The priority of the job on a scale of 0 to 100. The highest priority (first scheduled) is 100. When two jobs have the same priority, the oldest job is scheduled first. /// - queueId: The ID of the queue that the job is submitted to. + /// - sourceJobId: The job ID for the source job. /// - storageProfileId: The storage profile ID for the storage profile to connect to the job. /// - targetTaskRunStatus: The initial job status when it is created. Jobs that are created with a SUSPENDED status will not run until manually requeued. /// - template: The job template to use for this job. @@ -698,10 +699,11 @@ public struct Deadline: AWSService { parameters: [String: JobParameter]? = nil, priority: Int, queueId: String, + sourceJobId: String? = nil, storageProfileId: String? = nil, targetTaskRunStatus: CreateJobTargetTaskRunStatus? = nil, - template: String, - templateType: JobTemplateType, + template: String? = nil, + templateType: JobTemplateType? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateJobResponse { let input = CreateJobRequest( @@ -713,6 +715,7 @@ public struct Deadline: AWSService { parameters: parameters, priority: priority, queueId: queueId, + sourceJobId: sourceJobId, storageProfileId: storageProfileId, targetTaskRunStatus: targetTaskRunStatus, template: template, @@ -2407,6 +2410,48 @@ public struct Deadline: AWSService { return try await self.listJobMembers(input, logger: logger) } + /// Lists parameter definitions of a job. + @Sendable + @inlinable + public func listJobParameterDefinitions(_ input: ListJobParameterDefinitionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListJobParameterDefinitionsResponse { + try await self.client.execute( + operation: "ListJobParameterDefinitions", + path: "/2023-10-12/farms/{farmId}/queues/{queueId}/jobs/{jobId}/parameter-definitions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + hostPrefix: "management.", + logger: logger + ) + } + /// Lists parameter definitions of a job. + /// + /// Parameters: + /// - farmId: The farm ID of the job to list. + /// - jobId: The job ID to include on the list. + /// - maxResults: The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. + /// - nextToken: The token for the next set of results, or null to start from the beginning. + /// - queueId: The queue ID to include on the list. + /// - logger: Logger use during operation + @inlinable + public func listJobParameterDefinitions( + farmId: String, + jobId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + queueId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListJobParameterDefinitionsResponse { + let input = ListJobParameterDefinitionsRequest( + farmId: farmId, + jobId: jobId, + maxResults: maxResults, + nextToken: nextToken, + queueId: queueId + ) + return try await self.listJobParameterDefinitions(input, logger: logger) + } + /// Lists jobs. @Sendable @inlinable @@ -4496,6 +4541,49 @@ extension Deadline { return self.listJobMembersPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listJobParameterDefinitions(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listJobParameterDefinitionsPaginator( + _ input: ListJobParameterDefinitionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listJobParameterDefinitions, + inputKey: \ListJobParameterDefinitionsRequest.nextToken, + outputKey: \ListJobParameterDefinitionsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listJobParameterDefinitions(_:logger:)``. + /// + /// - Parameters: + /// - farmId: The farm ID of the job to list. + /// - jobId: The job ID to include on the list. + /// - maxResults: The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. + /// - queueId: The queue ID to include on the list. + /// - logger: Logger used for logging + @inlinable + public func listJobParameterDefinitionsPaginator( + farmId: String, + jobId: String, + maxResults: Int? = nil, + queueId: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListJobParameterDefinitionsRequest( + farmId: farmId, + jobId: jobId, + maxResults: maxResults, + queueId: queueId + ) + return self.listJobParameterDefinitionsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listJobs(_:logger:)``. /// /// - Parameters: @@ -5339,6 +5427,19 @@ extension Deadline.ListJobMembersRequest: AWSPaginateToken { } } +extension Deadline.ListJobParameterDefinitionsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Deadline.ListJobParameterDefinitionsRequest { + return .init( + farmId: self.farmId, + jobId: self.jobId, + maxResults: self.maxResults, + nextToken: token, + queueId: self.queueId + ) + } +} + extension Deadline.ListJobsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Deadline.ListJobsRequest { diff --git a/Sources/Soto/Services/Deadline/Deadline_shapes.swift b/Sources/Soto/Services/Deadline/Deadline_shapes.swift index ebe33ec73d..be11e88816 100644 --- a/Sources/Soto/Services/Deadline/Deadline_shapes.swift +++ b/Sources/Soto/Services/Deadline/Deadline_shapes.swift @@ -2121,17 +2121,19 @@ extension Deadline { public let priority: Int /// The ID of the queue that the job is submitted to. public let queueId: String + /// The job ID for the source job. + public let sourceJobId: String? /// The storage profile ID for the storage profile to connect to the job. public let storageProfileId: String? /// The initial job status when it is created. Jobs that are created with a SUSPENDED status will not run until manually requeued. public let targetTaskRunStatus: CreateJobTargetTaskRunStatus? /// The job template to use for this job. - public let template: String + public let template: String? /// The file type for the job template. - public let templateType: JobTemplateType + public let templateType: JobTemplateType? @inlinable - public init(attachments: Attachments? = nil, clientToken: String? = CreateJobRequest.idempotencyToken(), farmId: String, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, parameters: [String: JobParameter]? = nil, priority: Int, queueId: String, storageProfileId: String? = nil, targetTaskRunStatus: CreateJobTargetTaskRunStatus? = nil, template: String, templateType: JobTemplateType) { + public init(attachments: Attachments? = nil, clientToken: String? = CreateJobRequest.idempotencyToken(), farmId: String, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, parameters: [String: JobParameter]? = nil, priority: Int, queueId: String, sourceJobId: String? = nil, storageProfileId: String? = nil, targetTaskRunStatus: CreateJobTargetTaskRunStatus? = nil, template: String? = nil, templateType: JobTemplateType? = nil) { self.attachments = attachments self.clientToken = clientToken self.farmId = farmId @@ -2140,6 +2142,7 @@ extension Deadline { self.parameters = parameters self.priority = priority self.queueId = queueId + self.sourceJobId = sourceJobId self.storageProfileId = storageProfileId self.targetTaskRunStatus = targetTaskRunStatus self.template = template @@ -2157,10 +2160,11 @@ extension Deadline { try container.encodeIfPresent(self.parameters, forKey: .parameters) try container.encode(self.priority, forKey: .priority) request.encodePath(self.queueId, key: "queueId") + try container.encodeIfPresent(self.sourceJobId, forKey: .sourceJobId) try container.encodeIfPresent(self.storageProfileId, forKey: .storageProfileId) try container.encodeIfPresent(self.targetTaskRunStatus, forKey: .targetTaskRunStatus) - try container.encode(self.template, forKey: .template) - try container.encode(self.templateType, forKey: .templateType) + try container.encodeIfPresent(self.template, forKey: .template) + try container.encodeIfPresent(self.templateType, forKey: .templateType) } public func validate(name: String) throws { @@ -2178,6 +2182,7 @@ extension Deadline { try self.validate(self.priority, name: "priority", parent: name, max: 100) try self.validate(self.priority, name: "priority", parent: name, min: 0) try self.validate(self.queueId, name: "queueId", parent: name, pattern: "^queue-[0-9a-f]{32}$") + try self.validate(self.sourceJobId, name: "sourceJobId", parent: name, pattern: "^job-[0-9a-f]{32}$") try self.validate(self.storageProfileId, name: "storageProfileId", parent: name, pattern: "^sp-[0-9a-f]{32}$") try self.validate(self.template, name: "template", parent: name, max: 300000) try self.validate(self.template, name: "template", parent: name, min: 1) @@ -2189,6 +2194,7 @@ extension Deadline { case maxRetriesPerTask = "maxRetriesPerTask" case parameters = "parameters" case priority = "priority" + case sourceJobId = "sourceJobId" case storageProfileId = "storageProfileId" case targetTaskRunStatus = "targetTaskRunStatus" case template = "template" @@ -4123,6 +4129,8 @@ extension Deadline { public let parameters: [String: JobParameter]? /// The job priority. public let priority: Int + /// The job ID for the source job. + public let sourceJobId: String? /// The date and time the resource started running. @OptionalCustomCoding public var startedAt: Date? @@ -4141,7 +4149,7 @@ extension Deadline { public let updatedBy: String? @inlinable - public init(attachments: Attachments? = nil, createdAt: Date, createdBy: String, description: String? = nil, endedAt: Date? = nil, jobId: String, lifecycleStatus: JobLifecycleStatus, lifecycleStatusMessage: String, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, name: String, parameters: [String: JobParameter]? = nil, priority: Int, startedAt: Date? = nil, storageProfileId: String? = nil, targetTaskRunStatus: JobTargetTaskRunStatus? = nil, taskRunStatus: TaskRunStatus? = nil, taskRunStatusCounts: [TaskRunStatus: Int]? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { + public init(attachments: Attachments? = nil, createdAt: Date, createdBy: String, description: String? = nil, endedAt: Date? = nil, jobId: String, lifecycleStatus: JobLifecycleStatus, lifecycleStatusMessage: String, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, name: String, parameters: [String: JobParameter]? = nil, priority: Int, sourceJobId: String? = nil, startedAt: Date? = nil, storageProfileId: String? = nil, targetTaskRunStatus: JobTargetTaskRunStatus? = nil, taskRunStatus: TaskRunStatus? = nil, taskRunStatusCounts: [TaskRunStatus: Int]? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { self.attachments = attachments self.createdAt = createdAt self.createdBy = createdBy @@ -4155,6 +4163,7 @@ extension Deadline { self.name = name self.parameters = parameters self.priority = priority + self.sourceJobId = sourceJobId self.startedAt = startedAt self.storageProfileId = storageProfileId self.targetTaskRunStatus = targetTaskRunStatus @@ -4178,6 +4187,7 @@ extension Deadline { case name = "name" case parameters = "parameters" case priority = "priority" + case sourceJobId = "sourceJobId" case startedAt = "startedAt" case storageProfileId = "storageProfileId" case targetTaskRunStatus = "targetTaskRunStatus" @@ -5597,6 +5607,8 @@ extension Deadline { public let priority: Int? /// The queue ID. public let queueId: String? + /// The job ID for the source job. + public let sourceJobId: String? /// The date and time the resource started running. @OptionalCustomCoding public var startedAt: Date? @@ -5608,7 +5620,7 @@ extension Deadline { public let taskRunStatusCounts: [TaskRunStatus: Int]? @inlinable - public init(createdAt: Date? = nil, createdBy: String? = nil, endedAt: Date? = nil, jobId: String? = nil, jobParameters: [String: JobParameter]? = nil, lifecycleStatus: JobLifecycleStatus? = nil, lifecycleStatusMessage: String? = nil, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, name: String? = nil, priority: Int? = nil, queueId: String? = nil, startedAt: Date? = nil, targetTaskRunStatus: JobTargetTaskRunStatus? = nil, taskRunStatus: TaskRunStatus? = nil, taskRunStatusCounts: [TaskRunStatus: Int]? = nil) { + public init(createdAt: Date? = nil, createdBy: String? = nil, endedAt: Date? = nil, jobId: String? = nil, jobParameters: [String: JobParameter]? = nil, lifecycleStatus: JobLifecycleStatus? = nil, lifecycleStatusMessage: String? = nil, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, name: String? = nil, priority: Int? = nil, queueId: String? = nil, sourceJobId: String? = nil, startedAt: Date? = nil, targetTaskRunStatus: JobTargetTaskRunStatus? = nil, taskRunStatus: TaskRunStatus? = nil, taskRunStatusCounts: [TaskRunStatus: Int]? = nil) { self.createdAt = createdAt self.createdBy = createdBy self.endedAt = endedAt @@ -5621,6 +5633,7 @@ extension Deadline { self.name = name self.priority = priority self.queueId = queueId + self.sourceJobId = sourceJobId self.startedAt = startedAt self.targetTaskRunStatus = targetTaskRunStatus self.taskRunStatus = taskRunStatus @@ -5640,6 +5653,7 @@ extension Deadline { case name = "name" case priority = "priority" case queueId = "queueId" + case sourceJobId = "sourceJobId" case startedAt = "startedAt" case targetTaskRunStatus = "targetTaskRunStatus" case taskRunStatus = "taskRunStatus" @@ -5670,6 +5684,8 @@ extension Deadline { public let name: String /// The job priority. public let priority: Int + /// The job ID for the source job. + public let sourceJobId: String? /// The date and time the resource started running. @OptionalCustomCoding public var startedAt: Date? @@ -5686,7 +5702,7 @@ extension Deadline { public let updatedBy: String? @inlinable - public init(createdAt: Date, createdBy: String, endedAt: Date? = nil, jobId: String, lifecycleStatus: JobLifecycleStatus, lifecycleStatusMessage: String, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, name: String, priority: Int, startedAt: Date? = nil, targetTaskRunStatus: JobTargetTaskRunStatus? = nil, taskRunStatus: TaskRunStatus? = nil, taskRunStatusCounts: [TaskRunStatus: Int]? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { + public init(createdAt: Date, createdBy: String, endedAt: Date? = nil, jobId: String, lifecycleStatus: JobLifecycleStatus, lifecycleStatusMessage: String, maxFailedTasksCount: Int? = nil, maxRetriesPerTask: Int? = nil, name: String, priority: Int, sourceJobId: String? = nil, startedAt: Date? = nil, targetTaskRunStatus: JobTargetTaskRunStatus? = nil, taskRunStatus: TaskRunStatus? = nil, taskRunStatusCounts: [TaskRunStatus: Int]? = nil, updatedAt: Date? = nil, updatedBy: String? = nil) { self.createdAt = createdAt self.createdBy = createdBy self.endedAt = endedAt @@ -5697,6 +5713,7 @@ extension Deadline { self.maxRetriesPerTask = maxRetriesPerTask self.name = name self.priority = priority + self.sourceJobId = sourceJobId self.startedAt = startedAt self.targetTaskRunStatus = targetTaskRunStatus self.taskRunStatus = taskRunStatus @@ -5716,6 +5733,7 @@ extension Deadline { case maxRetriesPerTask = "maxRetriesPerTask" case name = "name" case priority = "priority" + case sourceJobId = "sourceJobId" case startedAt = "startedAt" case targetTaskRunStatus = "targetTaskRunStatus" case taskRunStatus = "taskRunStatus" @@ -6134,6 +6152,66 @@ extension Deadline { } } + public struct ListJobParameterDefinitionsRequest: AWSEncodableShape { + /// The farm ID of the job to list. + public let farmId: String + /// The job ID to include on the list. + public let jobId: String + /// The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. + public let maxResults: Int? + /// The token for the next set of results, or null to start from the beginning. + public let nextToken: String? + /// The queue ID to include on the list. + public let queueId: String + + @inlinable + public init(farmId: String, jobId: String, maxResults: Int? = nil, nextToken: String? = nil, queueId: String) { + self.farmId = farmId + self.jobId = jobId + self.maxResults = maxResults + self.nextToken = nextToken + self.queueId = queueId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.farmId, key: "farmId") + request.encodePath(self.jobId, key: "jobId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.queueId, key: "queueId") + } + + public func validate(name: String) throws { + try self.validate(self.farmId, name: "farmId", parent: name, pattern: "^farm-[0-9a-f]{32}$") + try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^job-[0-9a-f]{32}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.queueId, name: "queueId", parent: name, pattern: "^queue-[0-9a-f]{32}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListJobParameterDefinitionsResponse: AWSDecodableShape { + /// Lists parameter definitions of a job. + public let jobParameterDefinitions: [String] + /// If Deadline Cloud returns nextToken, then there are more results available. The value of nextToken is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, then nextToken is set to null. Each pagination token expires after 24 hours. If you provide a token that isn't valid, then you receive an HTTP 400 ValidationException error. + public let nextToken: String? + + @inlinable + public init(jobParameterDefinitions: [String], nextToken: String? = nil) { + self.jobParameterDefinitions = jobParameterDefinitions + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case jobParameterDefinitions = "jobParameterDefinitions" + case nextToken = "nextToken" + } + } + public struct ListJobsRequest: AWSEncodableShape { /// The farm ID for the jobs. public let farmId: String diff --git a/Sources/Soto/Services/DirectoryService/DirectoryService_api.swift b/Sources/Soto/Services/DirectoryService/DirectoryService_api.swift index 301d8b2763..c623c85526 100644 --- a/Sources/Soto/Services/DirectoryService/DirectoryService_api.swift +++ b/Sources/Soto/Services/DirectoryService/DirectoryService_api.swift @@ -88,6 +88,9 @@ public struct DirectoryService: AWSService { "us-east-2": "ds-fips.us-east-2.amazonaws.com", "us-gov-east-1": "ds-fips.us-gov-east-1.amazonaws.com", "us-gov-west-1": "ds-fips.us-gov-west-1.amazonaws.com", + "us-iso-east-1": "ds-fips.us-iso-east-1.c2s.ic.gov", + "us-iso-west-1": "ds-fips.us-iso-west-1.c2s.ic.gov", + "us-isob-east-1": "ds-fips.us-isob-east-1.sc2s.sgov.gov", "us-west-1": "ds-fips.us-west-1.amazonaws.com", "us-west-2": "ds-fips.us-west-2.amazonaws.com" ]) @@ -142,7 +145,7 @@ public struct DirectoryService: AWSService { /// Parameters: /// - directoryId: Identifier (ID) of the directory to which to add the address block. /// - ipRoutes: IP address blocks, using CIDR format, of the traffic to route. This is often the IP address block of the DNS server used for your self-managed domain. - /// - updateSecurityGroupForDirectoryControllers: If set to true, updates the inbound and outbound rules of the security group that has the description: "Amazon Web Services created security group for directory ID directory controllers." Following are the new rules: Inbound: Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: 0.0.0.0/0 Type: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0 Type: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0 Type: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0 Type: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0 Outbound: Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 These security rules impact an internal network interface that is not exposed publicly. + /// - updateSecurityGroupForDirectoryControllers: If set to true, updates the inbound and outbound rules of the security group that has the description: "Amazon Web Services created security group for directory ID directory controllers." Following are the new rules: Inbound: Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: Managed Microsoft AD VPC IPv4 CIDR Type: DNS (UDP), Protocol: UDP, Range: 53, Source: Managed Microsoft AD VPC IPv4 CIDR Type: DNS (TCP), Protocol: TCP, Range: 53, Source: Managed Microsoft AD VPC IPv4 CIDR Type: LDAP, Protocol: TCP, Range: 389, Source: Managed Microsoft AD VPC IPv4 CIDR Type: All ICMP, Protocol: All, Range: N/A, Source: Managed Microsoft AD VPC IPv4 CIDR Outbound: Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 These security rules impact an internal network interface that is not exposed publicly. /// - logger: Logger use during operation @inlinable public func addIpRoutes( @@ -592,7 +595,7 @@ public struct DirectoryService: AWSService { /// - remoteDomainName: The Fully Qualified Domain Name (FQDN) of the external domain for which to create the trust relationship. /// - selectiveAuth: Optional parameter to enable selective authentication for the trust. /// - trustDirection: The direction of the trust relationship. - /// - trustPassword: The trust password. The must be the same password that was used when creating the trust relationship on the external domain. + /// - trustPassword: The trust password. The trust password must be the same password that was used when creating the trust relationship on the external domain. /// - trustType: The trust relationship type. Forest is the default. /// - logger: Logger use during operation @inlinable @@ -970,6 +973,35 @@ public struct DirectoryService: AWSService { return try await self.describeDirectories(input, logger: logger) } + /// Obtains status of directory data access enablement through the Directory Service Data API for the specified directory. + @Sendable + @inlinable + public func describeDirectoryDataAccess(_ input: DescribeDirectoryDataAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDirectoryDataAccessResult { + try await self.client.execute( + operation: "DescribeDirectoryDataAccess", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Obtains status of directory data access enablement through the Directory Service Data API for the specified directory. + /// + /// Parameters: + /// - directoryId: The directory identifier. + /// - logger: Logger use during operation + @inlinable + public func describeDirectoryDataAccess( + directoryId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeDirectoryDataAccessResult { + let input = DescribeDirectoryDataAccessRequest( + directoryId: directoryId + ) + return try await self.describeDirectoryDataAccess(input, logger: logger) + } + /// Provides information about any domain controllers in your directory. @Sendable @inlinable @@ -1317,7 +1349,7 @@ public struct DirectoryService: AWSService { /// /// Parameters: /// - directoryId: The identifier of the directory - /// - type: The type of client authentication to disable. Currently, only the parameter, SmartCard is supported. + /// - type: The type of client authentication to disable. Currently the only parameter "SmartCard" is supported. /// - logger: Logger use during operation @inlinable public func disableClientAuthentication( @@ -1332,6 +1364,35 @@ public struct DirectoryService: AWSService { return try await self.disableClientAuthentication(input, logger: logger) } + /// Deactivates access to directory data via the Directory Service Data API for the specified directory. + @Sendable + @inlinable + public func disableDirectoryDataAccess(_ input: DisableDirectoryDataAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisableDirectoryDataAccessResult { + try await self.client.execute( + operation: "DisableDirectoryDataAccess", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deactivates access to directory data via the Directory Service Data API for the specified directory. + /// + /// Parameters: + /// - directoryId: The directory identifier. + /// - logger: Logger use during operation + @inlinable + public func disableDirectoryDataAccess( + directoryId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisableDirectoryDataAccessResult { + let input = DisableDirectoryDataAccessRequest( + directoryId: directoryId + ) + return try await self.disableDirectoryDataAccess(input, logger: logger) + } + /// Deactivates LDAP secure calls for the specified directory. @Sendable @inlinable @@ -1460,6 +1521,35 @@ public struct DirectoryService: AWSService { return try await self.enableClientAuthentication(input, logger: logger) } + /// Enables access to directory data via the Directory Service Data API for the specified directory. + @Sendable + @inlinable + public func enableDirectoryDataAccess(_ input: EnableDirectoryDataAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> EnableDirectoryDataAccessResult { + try await self.client.execute( + operation: "EnableDirectoryDataAccess", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Enables access to directory data via the Directory Service Data API for the specified directory. + /// + /// Parameters: + /// - directoryId: The directory identifier. + /// - logger: Logger use during operation + @inlinable + public func enableDirectoryDataAccess( + directoryId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> EnableDirectoryDataAccessResult { + let input = EnableDirectoryDataAccessRequest( + directoryId: directoryId + ) + return try await self.enableDirectoryDataAccess(input, logger: logger) + } + /// Activates the switch for the specific directory to always use LDAP secure calls. @Sendable @inlinable @@ -1981,7 +2071,7 @@ public struct DirectoryService: AWSService { return try await self.removeTagsFromResource(input, logger: logger) } - /// Resets the password for any user in your Managed Microsoft AD or Simple AD directory. You can reset the password for any user in your directory with the following exceptions: For Simple AD, you cannot reset the password for any user that is a member of either the Domain Admins or Enterprise Admins group except for the administrator user. For Managed Microsoft AD, you can only reset the password for a user that is in an OU based off of the NetBIOS name that you typed when you created your directory. For example, you cannot reset the password for a user in the Amazon Web Services Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration Guide. + /// Resets the password for any user in your Managed Microsoft AD or Simple AD directory. Disabled users will become enabled and can be authenticated following the API call. You can reset the password for any user in your directory with the following exceptions: For Simple AD, you cannot reset the password for any user that is a member of either the Domain Admins or Enterprise Admins group except for the administrator user. For Managed Microsoft AD, you can only reset the password for a user that is in an OU based off of the NetBIOS name that you typed when you created your directory. For example, you cannot reset the password for a user in the Amazon Web Services Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration Guide. @Sendable @inlinable public func resetUserPassword(_ input: ResetUserPasswordRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ResetUserPasswordResult { @@ -1994,7 +2084,7 @@ public struct DirectoryService: AWSService { logger: logger ) } - /// Resets the password for any user in your Managed Microsoft AD or Simple AD directory. You can reset the password for any user in your directory with the following exceptions: For Simple AD, you cannot reset the password for any user that is a member of either the Domain Admins or Enterprise Admins group except for the administrator user. For Managed Microsoft AD, you can only reset the password for a user that is in an OU based off of the NetBIOS name that you typed when you created your directory. For example, you cannot reset the password for a user in the Amazon Web Services Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration Guide. + /// Resets the password for any user in your Managed Microsoft AD or Simple AD directory. Disabled users will become enabled and can be authenticated following the API call. You can reset the password for any user in your directory with the following exceptions: For Simple AD, you cannot reset the password for any user that is a member of either the Domain Admins or Enterprise Admins group except for the administrator user. For Managed Microsoft AD, you can only reset the password for a user that is in an OU based off of the NetBIOS name that you typed when you created your directory. For example, you cannot reset the password for a user in the Amazon Web Services Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration Guide. /// /// Parameters: /// - directoryId: Identifier of the Managed Microsoft AD or Simple AD directory in which the user resides. diff --git a/Sources/Soto/Services/DirectoryService/DirectoryService_shapes.swift b/Sources/Soto/Services/DirectoryService/DirectoryService_shapes.swift index c93d62ebdc..858af437c5 100644 --- a/Sources/Soto/Services/DirectoryService/DirectoryService_shapes.swift +++ b/Sources/Soto/Services/DirectoryService/DirectoryService_shapes.swift @@ -54,6 +54,15 @@ extension DirectoryService { public var description: String { return self.rawValue } } + public enum DataAccessStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "Disabled" + case disabling = "Disabling" + case enabled = "Enabled" + case enabling = "Enabling" + case failed = "Failed" + public var description: String { return self.rawValue } + } + public enum DirectoryConfigurationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case `default` = "Default" case failed = "Failed" @@ -87,6 +96,7 @@ extension DirectoryService { case requested = "Requested" case restorefailed = "RestoreFailed" case restoring = "Restoring" + case updating = "Updating" public var description: String { return self.rawValue } } @@ -106,6 +116,7 @@ extension DirectoryService { case failed = "Failed" case impaired = "Impaired" case restoring = "Restoring" + case updating = "Updating" public var description: String { return self.rawValue } } @@ -307,7 +318,7 @@ extension DirectoryService { public let directoryId: String /// IP address blocks, using CIDR format, of the traffic to route. This is often the IP address block of the DNS server used for your self-managed domain. public let ipRoutes: [IpRoute] - /// If set to true, updates the inbound and outbound rules of the security group that has the description: "Amazon Web Services created security group for directory ID directory controllers." Following are the new rules: Inbound: Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0 Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: 0.0.0.0/0 Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: 0.0.0.0/0 Type: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0 Type: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0 Type: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0 Type: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0 Outbound: Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 These security rules impact an internal network interface that is not exposed publicly. + /// If set to true, updates the inbound and outbound rules of the security group that has the description: "Amazon Web Services created security group for directory ID directory controllers." Following are the new rules: Inbound: Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: Managed Microsoft AD VPC IPv4 CIDR Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: Managed Microsoft AD VPC IPv4 CIDR Type: DNS (UDP), Protocol: UDP, Range: 53, Source: Managed Microsoft AD VPC IPv4 CIDR Type: DNS (TCP), Protocol: TCP, Range: 53, Source: Managed Microsoft AD VPC IPv4 CIDR Type: LDAP, Protocol: TCP, Range: 389, Source: Managed Microsoft AD VPC IPv4 CIDR Type: All ICMP, Protocol: All, Range: N/A, Source: Managed Microsoft AD VPC IPv4 CIDR Outbound: Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 These security rules impact an internal network interface that is not exposed publicly. public let updateSecurityGroupForDirectoryControllers: Bool? @inlinable @@ -1010,7 +1021,7 @@ extension DirectoryService { public let selectiveAuth: SelectiveAuth? /// The direction of the trust relationship. public let trustDirection: TrustDirection - /// The trust password. The must be the same password that was used when creating the trust relationship on the external domain. + /// The trust password. The trust password must be the same password that was used when creating the trust relationship on the external domain. public let trustPassword: String /// The trust relationship type. Forest is the default. public let trustType: TrustType? @@ -1443,6 +1454,38 @@ extension DirectoryService { } } + public struct DescribeDirectoryDataAccessRequest: AWSEncodableShape { + /// The directory identifier. + public let directoryId: String + + @inlinable + public init(directoryId: String) { + self.directoryId = directoryId + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + } + } + + public struct DescribeDirectoryDataAccessResult: AWSDecodableShape { + /// The current status of data access through the Directory Service Data API. + public let dataAccessStatus: DataAccessStatus? + + @inlinable + public init(dataAccessStatus: DataAccessStatus? = nil) { + self.dataAccessStatus = dataAccessStatus + } + + private enum CodingKeys: String, CodingKey { + case dataAccessStatus = "DataAccessStatus" + } + } + public struct DescribeDomainControllersRequest: AWSEncodableShape { /// Identifier of the directory for which to retrieve the domain controller information. public let directoryId: String @@ -1661,7 +1704,7 @@ extension DirectoryService { public struct DescribeSettingsResult: AWSDecodableShape { /// The identifier of the directory. public let directoryId: String? - /// If not null, token that indicates that more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeSettings to retrieve the next set of items. + /// If not null, token that indicates that more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeSettings to retrieve the next set of items. public let nextToken: String? /// The list of SettingEntry objects that were retrieved. It is possible that this list contains less than the number of items specified in the Limit member of the request. This occurs if there are less than the requested number of items left to retrieve, or if the limitations of the operation have been exceeded. public let settingEntries: [SettingEntry]? @@ -2007,7 +2050,7 @@ extension DirectoryService { public let stageLastUpdatedDateTime: Date? /// Additional information about the directory stage. public let stageReason: String? - /// The directory size. + /// The directory type. public let type: DirectoryType? /// A DirectoryVpcSettingsDescription object that contains additional information about a directory. This member is only present if the directory is a Simple AD or Managed Microsoft AD directory. public let vpcSettings: DirectoryVpcSettingsDescription? @@ -2172,7 +2215,7 @@ extension DirectoryService { public struct DisableClientAuthenticationRequest: AWSEncodableShape { /// The identifier of the directory public let directoryId: String - /// The type of client authentication to disable. Currently, only the parameter, SmartCard is supported. + /// The type of client authentication to disable. Currently the only parameter "SmartCard" is supported. public let type: ClientAuthenticationType @inlinable @@ -2195,6 +2238,28 @@ extension DirectoryService { public init() {} } + public struct DisableDirectoryDataAccessRequest: AWSEncodableShape { + /// The directory identifier. + public let directoryId: String + + @inlinable + public init(directoryId: String) { + self.directoryId = directoryId + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + } + } + + public struct DisableDirectoryDataAccessResult: AWSDecodableShape { + public init() {} + } + public struct DisableLDAPSRequest: AWSEncodableShape { /// The identifier of the directory. public let directoryId: String @@ -2353,6 +2418,28 @@ extension DirectoryService { public init() {} } + public struct EnableDirectoryDataAccessRequest: AWSEncodableShape { + /// The directory identifier. + public let directoryId: String + + @inlinable + public init(directoryId: String) { + self.directoryId = directoryId + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + } + } + + public struct EnableDirectoryDataAccessResult: AWSDecodableShape { + public init() {} + } + public struct EnableLDAPSRequest: AWSEncodableShape { /// The identifier of the directory. public let directoryId: String @@ -2903,7 +2990,7 @@ extension DirectoryService { public let displayLabel: String? /// The port that your RADIUS server is using for communications. Your self-managed network must allow inbound traffic over this port from the Directory Service servers. public let radiusPort: Int? - /// The maximum number of times that communication with the RADIUS server is attempted. + /// The maximum number of times that communication with the RADIUS server is retried after the initial attempt. public let radiusRetries: Int? /// An array of strings that contains the fully qualified domain name (FQDN) or IP addresses of the RADIUS server endpoints, or the FQDN or IP addresses of your RADIUS server load balancer. public let radiusServers: [String]? @@ -2937,7 +3024,7 @@ extension DirectoryService { try validate($0, name: "radiusServers[]", parent: name, max: 256) try validate($0, name: "radiusServers[]", parent: name, min: 1) } - try self.validate(self.radiusTimeout, name: "radiusTimeout", parent: name, max: 20) + try self.validate(self.radiusTimeout, name: "radiusTimeout", parent: name, max: 50) try self.validate(self.radiusTimeout, name: "radiusTimeout", parent: name, min: 1) try self.validate(self.sharedSecret, name: "sharedSecret", parent: name, max: 512) try self.validate(self.sharedSecret, name: "sharedSecret", parent: name, min: 8) @@ -3605,9 +3692,9 @@ extension DirectoryService { } public struct Tag: AWSEncodableShape & AWSDecodableShape { - /// Required name of the tag. The string value can be Unicode characters and cannot be prefixed with "aws:". The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + /// Required name of the tag. The string value can be Unicode characters and cannot be prefixed with "aws:". The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-', ':', '@'(Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). public let key: String - /// The optional value of the tag. The string value can be Unicode characters. The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + /// The optional value of the tag. The string value can be Unicode characters. The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-', ':', '@' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). public let value: String @inlinable @@ -4096,7 +4183,7 @@ public struct DirectoryServiceErrorType: AWSErrorType { /// return error code string public var errorCode: String { self.error.rawValue } - /// Client authentication is not available in this region at this time. + /// You do not have sufficient access to perform this action. public static var accessDeniedException: Self { .init(.accessDeniedException) } /// An authentication error occurred. public static var authenticationFailedException: Self { .init(.authenticationFailedException) } @@ -4122,7 +4209,7 @@ public struct DirectoryServiceErrorType: AWSErrorType { public static var directoryLimitExceededException: Self { .init(.directoryLimitExceededException) } /// The specified directory has not been shared with this Amazon Web Services account. public static var directoryNotSharedException: Self { .init(.directoryNotSharedException) } - /// The specified directory is unavailable or could not be found. + /// The specified directory is unavailable. public static var directoryUnavailableException: Self { .init(.directoryUnavailableException) } /// The maximum allowed number of domain controllers per directory was exceeded. The default limit per directory is 20 domain controllers. public static var domainControllerLimitExceededException: Self { .init(.domainControllerLimitExceededException) } diff --git a/Sources/Soto/Services/DirectoryServiceData/DirectoryServiceData_api.swift b/Sources/Soto/Services/DirectoryServiceData/DirectoryServiceData_api.swift new file mode 100644 index 0000000000..d48163b24c --- /dev/null +++ b/Sources/Soto/Services/DirectoryServiceData/DirectoryServiceData_api.swift @@ -0,0 +1,1143 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_exported import SotoCore + +/// Service object for interacting with AWS DirectoryServiceData service. +/// +/// Amazon Web Services Directory Service Data is an extension of Directory Service. This API reference provides detailed information about Directory Service Data operations and object types. With Directory Service Data, you can create, read, update, and delete users, groups, and memberships from your Managed Microsoft AD without additional costs and without deploying dedicated management instances. You can also perform built-in object management tasks across directories without direct network connectivity, which simplifies provisioning and access management to achieve fully automated deployments. Directory Service Data supports user and group write operations, such as CreateUser and CreateGroup, within the organizational unit (OU) of your Managed Microsoft AD. Directory Service Data supports read operations, such as ListUsers and ListGroups, on all users, groups, and group memberships within your Managed Microsoft AD and across trusted realms. Directory Service Data supports adding and removing group members in your OU and the Amazon Web Services Delegated Groups OU, so you can grant and deny access to specific roles and permissions. For more information, see Manage users and groups in the Directory Service Administration Guide. Directory management operations and configuration changes made against the Directory Service API will also reflect in Directory Service Data API with eventual consistency. You can expect a short delay between management changes, such as adding a new directory trust and calling the Directory Service Data API for the newly created trusted realm. Directory Service Data connects to your Managed Microsoft AD domain controllers and performs operations on underlying directory objects. When you create your Managed Microsoft AD, you choose subnets for domain controllers that Directory Service creates on your behalf. If a domain controller is unavailable, Directory Service Data uses an available domain controller. As a result, you might notice eventual consistency while objects replicate from one domain controller to another domain controller. For more information, see What gets created in the Directory Service Administration Guide. Directory limits vary by Managed Microsoft AD edition: Standard edition – Supports 8 transactions per second (TPS) for read operations and 4 TPS for write operations per directory. There's a concurrency limit of 10 concurrent requests. Enterprise edition – Supports 16 transactions per second (TPS) for read operations and 8 TPS for write operations per directory. There's a concurrency limit of 10 concurrent requests. Amazon Web Services Account - Supports a total of 100 TPS for Directory Service Data operations across all directories. Directory Service Data only supports the Managed Microsoft AD directory type and is only available in the primary Amazon Web Services Region. For more information, see Managed Microsoft AD and Primary vs additional Regions in the Directory Service Administration Guide. +public struct DirectoryServiceData: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the DirectoryServiceData client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "DirectoryServiceData", + serviceIdentifier: "ds-data", + serviceProtocol: .restjson, + apiVersion: "2023-05-31", + endpoint: endpoint, + errorType: DirectoryServiceDataErrorType.self, + xmlNamespace: "http://directoryservicedata.amazonaws.com/doc/2023-05-31/", + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Adds an existing user, group, or computer as a group member. + @Sendable + @inlinable + public func addGroupMember(_ input: AddGroupMemberRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AddGroupMemberResult { + try await self.client.execute( + operation: "AddGroupMember", + path: "/GroupMemberships/AddGroupMember", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Adds an existing user, group, or computer as a group member. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - groupName: The name of the group. + /// - memberName: The SAMAccountName of the user, group, or computer to add as a group member. + /// - memberRealm: The domain name that's associated with the group member. This parameter is required only when adding a member outside of your Managed Microsoft AD domain to a group inside of your Managed Microsoft AD domain. This parameter defaults to the Managed Microsoft AD domain. This parameter is case insensitive. + /// - logger: Logger use during operation + @inlinable + public func addGroupMember( + clientToken: String? = AddGroupMemberRequest.idempotencyToken(), + directoryId: String, + groupName: String, + memberName: String, + memberRealm: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AddGroupMemberResult { + let input = AddGroupMemberRequest( + clientToken: clientToken, + directoryId: directoryId, + groupName: groupName, + memberName: memberName, + memberRealm: memberRealm + ) + return try await self.addGroupMember(input, logger: logger) + } + + /// Creates a new group. + @Sendable + @inlinable + public func createGroup(_ input: CreateGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGroupResult { + try await self.client.execute( + operation: "CreateGroup", + path: "/Groups/CreateGroup", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a new group. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - groupScope: The scope of the AD group. For details, see Active Directory security group scope. + /// - groupType: The AD group type. For details, see Active Directory security group type. + /// - otherAttributes: An expression that defines one or more attributes with the data type and value of each attribute. + /// - samAccountName: The name of the group. + /// - logger: Logger use during operation + @inlinable + public func createGroup( + clientToken: String? = CreateGroupRequest.idempotencyToken(), + directoryId: String, + groupScope: GroupScope? = nil, + groupType: GroupType? = nil, + otherAttributes: [String: AttributeValue]? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateGroupResult { + let input = CreateGroupRequest( + clientToken: clientToken, + directoryId: directoryId, + groupScope: groupScope, + groupType: groupType, + otherAttributes: otherAttributes, + samAccountName: samAccountName + ) + return try await self.createGroup(input, logger: logger) + } + + /// Creates a new user. + @Sendable + @inlinable + public func createUser(_ input: CreateUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateUserResult { + try await self.client.execute( + operation: "CreateUser", + path: "/Users/CreateUser", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a new user. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that’s associated with the user. + /// - emailAddress: The email address of the user. + /// - givenName: The first name of the user. + /// - otherAttributes: An expression that defines one or more attribute names with the data type and value of each attribute. A key is an attribute name, and the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes. Attribute names are case insensitive. + /// - samAccountName: The name of the user. + /// - surname: The last name of the user. + /// - logger: Logger use during operation + @inlinable + public func createUser( + clientToken: String? = CreateUserRequest.idempotencyToken(), + directoryId: String, + emailAddress: String? = nil, + givenName: String? = nil, + otherAttributes: [String: AttributeValue]? = nil, + samAccountName: String, + surname: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateUserResult { + let input = CreateUserRequest( + clientToken: clientToken, + directoryId: directoryId, + emailAddress: emailAddress, + givenName: givenName, + otherAttributes: otherAttributes, + samAccountName: samAccountName, + surname: surname + ) + return try await self.createUser(input, logger: logger) + } + + /// Deletes a group. + @Sendable + @inlinable + public func deleteGroup(_ input: DeleteGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteGroupResult { + try await self.client.execute( + operation: "DeleteGroup", + path: "/Groups/DeleteGroup", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a group. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - samAccountName: The name of the group. + /// - logger: Logger use during operation + @inlinable + public func deleteGroup( + clientToken: String? = DeleteGroupRequest.idempotencyToken(), + directoryId: String, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteGroupResult { + let input = DeleteGroupRequest( + clientToken: clientToken, + directoryId: directoryId, + samAccountName: samAccountName + ) + return try await self.deleteGroup(input, logger: logger) + } + + /// Deletes a user. + @Sendable + @inlinable + public func deleteUser(_ input: DeleteUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteUserResult { + try await self.client.execute( + operation: "DeleteUser", + path: "/Users/DeleteUser", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a user. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - samAccountName: The name of the user. + /// - logger: Logger use during operation + @inlinable + public func deleteUser( + clientToken: String? = DeleteUserRequest.idempotencyToken(), + directoryId: String, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteUserResult { + let input = DeleteUserRequest( + clientToken: clientToken, + directoryId: directoryId, + samAccountName: samAccountName + ) + return try await self.deleteUser(input, logger: logger) + } + + /// Returns information about a specific group. + @Sendable + @inlinable + public func describeGroup(_ input: DescribeGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeGroupResult { + try await self.client.execute( + operation: "DescribeGroup", + path: "/Groups/DescribeGroup", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about a specific group. + /// + /// Parameters: + /// - directoryId: The Identifier (ID) of the directory associated with the group. + /// - otherAttributes: One or more attributes to be returned for the group. For a list of supported attributes, see Directory Service Data Attributes. + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - samAccountName: The name of the group. + /// - logger: Logger use during operation + @inlinable + public func describeGroup( + directoryId: String, + otherAttributes: [String]? = nil, + realm: String? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeGroupResult { + let input = DescribeGroupRequest( + directoryId: directoryId, + otherAttributes: otherAttributes, + realm: realm, + samAccountName: samAccountName + ) + return try await self.describeGroup(input, logger: logger) + } + + /// Returns information about a specific user. + @Sendable + @inlinable + public func describeUser(_ input: DescribeUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeUserResult { + try await self.client.execute( + operation: "DescribeUser", + path: "/Users/DescribeUser", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about a specific user. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - otherAttributes: One or more attribute names to be returned for the user. A key is an attribute name, and the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes. + /// - realm: The domain name that's associated with the user. This parameter is optional, so you can return users outside your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + /// - samAccountName: The name of the user. + /// - logger: Logger use during operation + @inlinable + public func describeUser( + directoryId: String, + otherAttributes: [String]? = nil, + realm: String? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeUserResult { + let input = DescribeUserRequest( + directoryId: directoryId, + otherAttributes: otherAttributes, + realm: realm, + samAccountName: samAccountName + ) + return try await self.describeUser(input, logger: logger) + } + + /// Deactivates an active user account. For information about how to enable an inactive user account, see ResetUserPassword in the Directory Service API Reference. + @Sendable + @inlinable + public func disableUser(_ input: DisableUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisableUserResult { + try await self.client.execute( + operation: "DisableUser", + path: "/Users/DisableUser", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deactivates an active user account. For information about how to enable an inactive user account, see ResetUserPassword in the Directory Service API Reference. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - samAccountName: The name of the user. + /// - logger: Logger use during operation + @inlinable + public func disableUser( + clientToken: String? = DisableUserRequest.idempotencyToken(), + directoryId: String, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisableUserResult { + let input = DisableUserRequest( + clientToken: clientToken, + directoryId: directoryId, + samAccountName: samAccountName + ) + return try await self.disableUser(input, logger: logger) + } + + /// Returns member information for the specified group. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListGroupMembers.NextToken member contains a token that you pass in the next call to ListGroupMembers. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + @Sendable + @inlinable + public func listGroupMembers(_ input: ListGroupMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGroupMembersResult { + try await self.client.execute( + operation: "ListGroupMembers", + path: "/GroupMemberships/ListGroupMembers", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns member information for the specified group. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListGroupMembers.NextToken member contains a token that you pass in the next call to ListGroupMembers. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - maxResults: The maximum number of results to be returned per request. + /// - memberRealm: The domain name that's associated with the group member. This parameter defaults to the Managed Microsoft AD domain. This parameter is optional and case insensitive. + /// - nextToken: An encoded paging token for paginated calls that can be passed back to retrieve the next page. + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return members from a group outside of your Managed Microsoft AD domain. When no value is defined, only members of your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - samAccountName: The name of the group. + /// - logger: Logger use during operation + @inlinable + public func listGroupMembers( + directoryId: String, + maxResults: Int? = nil, + memberRealm: String? = nil, + nextToken: String? = nil, + realm: String? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListGroupMembersResult { + let input = ListGroupMembersRequest( + directoryId: directoryId, + maxResults: maxResults, + memberRealm: memberRealm, + nextToken: nextToken, + realm: realm, + samAccountName: samAccountName + ) + return try await self.listGroupMembers(input, logger: logger) + } + + /// Returns group information for the specified directory. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListGroups.NextToken member contains a token that you pass in the next call to ListGroups. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + @Sendable + @inlinable + public func listGroups(_ input: ListGroupsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGroupsResult { + try await self.client.execute( + operation: "ListGroups", + path: "/Groups/ListGroups", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns group information for the specified directory. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListGroups.NextToken member contains a token that you pass in the next call to ListGroups. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - maxResults: The maximum number of results to be returned per request. + /// - nextToken: An encoded paging token for paginated calls that can be passed back to retrieve the next page. + /// - realm: The domain name associated with the directory. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - logger: Logger use during operation + @inlinable + public func listGroups( + directoryId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + realm: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListGroupsResult { + let input = ListGroupsRequest( + directoryId: directoryId, + maxResults: maxResults, + nextToken: nextToken, + realm: realm + ) + return try await self.listGroups(input, logger: logger) + } + + /// Returns group information for the specified member. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListGroupsForMember.NextToken member contains a token that you pass in the next call to ListGroupsForMember. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + @Sendable + @inlinable + public func listGroupsForMember(_ input: ListGroupsForMemberRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGroupsForMemberResult { + try await self.client.execute( + operation: "ListGroupsForMember", + path: "/GroupMemberships/ListGroupsForMember", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns group information for the specified member. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListGroupsForMember.NextToken member contains a token that you pass in the next call to ListGroupsForMember. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the member. + /// - maxResults: The maximum number of results to be returned per request. + /// - memberRealm: The domain name that's associated with the group member. This parameter is optional, so you can limit your results to the group members in a specific domain. This parameter is case insensitive and defaults to Realm + /// - nextToken: An encoded paging token for paginated calls that can be passed back to retrieve the next page. + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive and defaults to your Managed Microsoft AD domain. + /// - samAccountName: The SAMAccountName of the user, group, or computer that's a member of the group. + /// - logger: Logger use during operation + @inlinable + public func listGroupsForMember( + directoryId: String, + maxResults: Int? = nil, + memberRealm: String? = nil, + nextToken: String? = nil, + realm: String? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListGroupsForMemberResult { + let input = ListGroupsForMemberRequest( + directoryId: directoryId, + maxResults: maxResults, + memberRealm: memberRealm, + nextToken: nextToken, + realm: realm, + samAccountName: samAccountName + ) + return try await self.listGroupsForMember(input, logger: logger) + } + + /// Returns user information for the specified directory. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListUsers.NextToken member contains a token that you pass in the next call to ListUsers. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + @Sendable + @inlinable + public func listUsers(_ input: ListUsersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListUsersResult { + try await self.client.execute( + operation: "ListUsers", + path: "/Users/ListUsers", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns user information for the specified directory. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the ListUsers.NextToken member contains a token that you pass in the next call to ListUsers. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - maxResults: The maximum number of results to be returned per request. + /// - nextToken: An encoded paging token for paginated calls that can be passed back to retrieve the next page. + /// - realm: The domain name that's associated with the user. This parameter is optional, so you can return users outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + /// - logger: Logger use during operation + @inlinable + public func listUsers( + directoryId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + realm: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListUsersResult { + let input = ListUsersRequest( + directoryId: directoryId, + maxResults: maxResults, + nextToken: nextToken, + realm: realm + ) + return try await self.listUsers(input, logger: logger) + } + + /// Removes a member from a group. + @Sendable + @inlinable + public func removeGroupMember(_ input: RemoveGroupMemberRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RemoveGroupMemberResult { + try await self.client.execute( + operation: "RemoveGroupMember", + path: "/GroupMemberships/RemoveGroupMember", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes a member from a group. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the member. + /// - groupName: The name of the group. + /// - memberName: The SAMAccountName of the user, group, or computer to remove from the group. + /// - memberRealm: The domain name that's associated with the group member. This parameter defaults to the Managed Microsoft AD domain. This parameter is optional and case insensitive. + /// - logger: Logger use during operation + @inlinable + public func removeGroupMember( + clientToken: String? = RemoveGroupMemberRequest.idempotencyToken(), + directoryId: String, + groupName: String, + memberName: String, + memberRealm: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> RemoveGroupMemberResult { + let input = RemoveGroupMemberRequest( + clientToken: clientToken, + directoryId: directoryId, + groupName: groupName, + memberName: memberName, + memberRealm: memberRealm + ) + return try await self.removeGroupMember(input, logger: logger) + } + + /// Searches the specified directory for a group. You can find groups that match the SearchString parameter with the value of their attributes included in the SearchString parameter. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the SearchGroups.NextToken member contains a token that you pass in the next call to SearchGroups. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + @Sendable + @inlinable + public func searchGroups(_ input: SearchGroupsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchGroupsResult { + try await self.client.execute( + operation: "SearchGroups", + path: "/Groups/SearchGroups", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches the specified directory for a group. You can find groups that match the SearchString parameter with the value of their attributes included in the SearchString parameter. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the SearchGroups.NextToken member contains a token that you pass in the next call to SearchGroups. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - maxResults: The maximum number of results to be returned per request. + /// - nextToken: An encoded paging token for paginated calls that can be passed back to retrieve the next page. + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - searchAttributes: One or more data attributes that are used to search for a group. For a list of supported attributes, see Directory Service Data Attributes. + /// - searchString: The attribute value that you want to search for. Wildcard (*) searches aren't supported. For a list of supported attributes, see Directory Service Data Attributes. + /// - logger: Logger use during operation + @inlinable + public func searchGroups( + directoryId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + realm: String? = nil, + searchAttributes: [String], + searchString: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> SearchGroupsResult { + let input = SearchGroupsRequest( + directoryId: directoryId, + maxResults: maxResults, + nextToken: nextToken, + realm: realm, + searchAttributes: searchAttributes, + searchString: searchString + ) + return try await self.searchGroups(input, logger: logger) + } + + /// Searches the specified directory for a user. You can find users that match the SearchString parameter with the value of their attributes included in the SearchString parameter. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the SearchUsers.NextToken member contains a token that you pass in the next call to SearchUsers. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + @Sendable + @inlinable + public func searchUsers(_ input: SearchUsersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SearchUsersResult { + try await self.client.execute( + operation: "SearchUsers", + path: "/Users/SearchUsers", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Searches the specified directory for a user. You can find users that match the SearchString parameter with the value of their attributes included in the SearchString parameter. This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the SearchUsers.NextToken member contains a token that you pass in the next call to SearchUsers. This retrieves the next set of items. You can also specify a maximum number of return results with the MaxResults parameter. + /// + /// Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - maxResults: The maximum number of results to be returned per request. + /// - nextToken: An encoded paging token for paginated calls that can be passed back to retrieve the next page. + /// - realm: The domain name that's associated with the user. This parameter is optional, so you can return users outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + /// - searchAttributes: One or more data attributes that are used to search for a user. For a list of supported attributes, see Directory Service Data Attributes. + /// - searchString: The attribute value that you want to search for. Wildcard (*) searches aren't supported. For a list of supported attributes, see Directory Service Data Attributes. + /// - logger: Logger use during operation + @inlinable + public func searchUsers( + directoryId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + realm: String? = nil, + searchAttributes: [String], + searchString: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> SearchUsersResult { + let input = SearchUsersRequest( + directoryId: directoryId, + maxResults: maxResults, + nextToken: nextToken, + realm: realm, + searchAttributes: searchAttributes, + searchString: searchString + ) + return try await self.searchUsers(input, logger: logger) + } + + /// Updates group information. + @Sendable + @inlinable + public func updateGroup(_ input: UpdateGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateGroupResult { + try await self.client.execute( + operation: "UpdateGroup", + path: "/Groups/UpdateGroup", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates group information. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - groupScope: The scope of the AD group. For details, see Active Directory security groups. + /// - groupType: The AD group type. For details, see Active Directory security group type. + /// - otherAttributes: An expression that defines one or more attributes with the data type and the value of each attribute. + /// - samAccountName: The name of the group. + /// - updateType: The type of update to be performed. If no value exists for the attribute, use ADD. Otherwise, use REPLACE to change an attribute value or REMOVE to clear the attribute value. + /// - logger: Logger use during operation + @inlinable + public func updateGroup( + clientToken: String? = UpdateGroupRequest.idempotencyToken(), + directoryId: String, + groupScope: GroupScope? = nil, + groupType: GroupType? = nil, + otherAttributes: [String: AttributeValue]? = nil, + samAccountName: String, + updateType: UpdateType? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateGroupResult { + let input = UpdateGroupRequest( + clientToken: clientToken, + directoryId: directoryId, + groupScope: groupScope, + groupType: groupType, + otherAttributes: otherAttributes, + samAccountName: samAccountName, + updateType: updateType + ) + return try await self.updateGroup(input, logger: logger) + } + + /// Updates user information. + @Sendable + @inlinable + public func updateUser(_ input: UpdateUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateUserResult { + try await self.client.execute( + operation: "UpdateUser", + path: "/Users/UpdateUser", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates user information. + /// + /// Parameters: + /// - clientToken: A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - emailAddress: The email address of the user. + /// - givenName: The first name of the user. + /// - otherAttributes: An expression that defines one or more attribute names with the data type and value of each attribute. A key is an attribute name, and the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes. Attribute names are case insensitive. + /// - samAccountName: The name of the user. + /// - surname: The last name of the user. + /// - updateType: The type of update to be performed. If no value exists for the attribute, use ADD. Otherwise, use REPLACE to change an attribute value or REMOVE to clear the attribute value. + /// - logger: Logger use during operation + @inlinable + public func updateUser( + clientToken: String? = UpdateUserRequest.idempotencyToken(), + directoryId: String, + emailAddress: String? = nil, + givenName: String? = nil, + otherAttributes: [String: AttributeValue]? = nil, + samAccountName: String, + surname: String? = nil, + updateType: UpdateType? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateUserResult { + let input = UpdateUserRequest( + clientToken: clientToken, + directoryId: directoryId, + emailAddress: emailAddress, + givenName: givenName, + otherAttributes: otherAttributes, + samAccountName: samAccountName, + surname: surname, + updateType: updateType + ) + return try await self.updateUser(input, logger: logger) + } +} + +extension DirectoryServiceData { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: DirectoryServiceData, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension DirectoryServiceData { + /// Return PaginatorSequence for operation ``listGroupMembers(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listGroupMembersPaginator( + _ input: ListGroupMembersRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listGroupMembers, + inputKey: \ListGroupMembersRequest.nextToken, + outputKey: \ListGroupMembersResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listGroupMembers(_:logger:)``. + /// + /// - Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - maxResults: The maximum number of results to be returned per request. + /// - memberRealm: The domain name that's associated with the group member. This parameter defaults to the Managed Microsoft AD domain. This parameter is optional and case insensitive. + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return members from a group outside of your Managed Microsoft AD domain. When no value is defined, only members of your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - samAccountName: The name of the group. + /// - logger: Logger used for logging + @inlinable + public func listGroupMembersPaginator( + directoryId: String, + maxResults: Int? = nil, + memberRealm: String? = nil, + realm: String? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListGroupMembersRequest( + directoryId: directoryId, + maxResults: maxResults, + memberRealm: memberRealm, + realm: realm, + samAccountName: samAccountName + ) + return self.listGroupMembersPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listGroups(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listGroupsPaginator( + _ input: ListGroupsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listGroups, + inputKey: \ListGroupsRequest.nextToken, + outputKey: \ListGroupsResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listGroups(_:logger:)``. + /// + /// - Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - maxResults: The maximum number of results to be returned per request. + /// - realm: The domain name associated with the directory. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - logger: Logger used for logging + @inlinable + public func listGroupsPaginator( + directoryId: String, + maxResults: Int? = nil, + realm: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListGroupsRequest( + directoryId: directoryId, + maxResults: maxResults, + realm: realm + ) + return self.listGroupsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listGroupsForMember(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listGroupsForMemberPaginator( + _ input: ListGroupsForMemberRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listGroupsForMember, + inputKey: \ListGroupsForMemberRequest.nextToken, + outputKey: \ListGroupsForMemberResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listGroupsForMember(_:logger:)``. + /// + /// - Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the member. + /// - maxResults: The maximum number of results to be returned per request. + /// - memberRealm: The domain name that's associated with the group member. This parameter is optional, so you can limit your results to the group members in a specific domain. This parameter is case insensitive and defaults to Realm + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive and defaults to your Managed Microsoft AD domain. + /// - samAccountName: The SAMAccountName of the user, group, or computer that's a member of the group. + /// - logger: Logger used for logging + @inlinable + public func listGroupsForMemberPaginator( + directoryId: String, + maxResults: Int? = nil, + memberRealm: String? = nil, + realm: String? = nil, + samAccountName: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListGroupsForMemberRequest( + directoryId: directoryId, + maxResults: maxResults, + memberRealm: memberRealm, + realm: realm, + samAccountName: samAccountName + ) + return self.listGroupsForMemberPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listUsers(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listUsersPaginator( + _ input: ListUsersRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listUsers, + inputKey: \ListUsersRequest.nextToken, + outputKey: \ListUsersResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listUsers(_:logger:)``. + /// + /// - Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - maxResults: The maximum number of results to be returned per request. + /// - realm: The domain name that's associated with the user. This parameter is optional, so you can return users outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + /// - logger: Logger used for logging + @inlinable + public func listUsersPaginator( + directoryId: String, + maxResults: Int? = nil, + realm: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListUsersRequest( + directoryId: directoryId, + maxResults: maxResults, + realm: realm + ) + return self.listUsersPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``searchGroups(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func searchGroupsPaginator( + _ input: SearchGroupsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchGroups, + inputKey: \SearchGroupsRequest.nextToken, + outputKey: \SearchGroupsResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``searchGroups(_:logger:)``. + /// + /// - Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the group. + /// - maxResults: The maximum number of results to be returned per request. + /// - realm: The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + /// - searchAttributes: One or more data attributes that are used to search for a group. For a list of supported attributes, see Directory Service Data Attributes. + /// - searchString: The attribute value that you want to search for. Wildcard (*) searches aren't supported. For a list of supported attributes, see Directory Service Data Attributes. + /// - logger: Logger used for logging + @inlinable + public func searchGroupsPaginator( + directoryId: String, + maxResults: Int? = nil, + realm: String? = nil, + searchAttributes: [String], + searchString: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = SearchGroupsRequest( + directoryId: directoryId, + maxResults: maxResults, + realm: realm, + searchAttributes: searchAttributes, + searchString: searchString + ) + return self.searchGroupsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``searchUsers(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func searchUsersPaginator( + _ input: SearchUsersRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.searchUsers, + inputKey: \SearchUsersRequest.nextToken, + outputKey: \SearchUsersResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``searchUsers(_:logger:)``. + /// + /// - Parameters: + /// - directoryId: The identifier (ID) of the directory that's associated with the user. + /// - maxResults: The maximum number of results to be returned per request. + /// - realm: The domain name that's associated with the user. This parameter is optional, so you can return users outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + /// - searchAttributes: One or more data attributes that are used to search for a user. For a list of supported attributes, see Directory Service Data Attributes. + /// - searchString: The attribute value that you want to search for. Wildcard (*) searches aren't supported. For a list of supported attributes, see Directory Service Data Attributes. + /// - logger: Logger used for logging + @inlinable + public func searchUsersPaginator( + directoryId: String, + maxResults: Int? = nil, + realm: String? = nil, + searchAttributes: [String], + searchString: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = SearchUsersRequest( + directoryId: directoryId, + maxResults: maxResults, + realm: realm, + searchAttributes: searchAttributes, + searchString: searchString + ) + return self.searchUsersPaginator(input, logger: logger) + } +} + +extension DirectoryServiceData.ListGroupMembersRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DirectoryServiceData.ListGroupMembersRequest { + return .init( + directoryId: self.directoryId, + maxResults: self.maxResults, + memberRealm: self.memberRealm, + nextToken: token, + realm: self.realm, + samAccountName: self.samAccountName + ) + } +} + +extension DirectoryServiceData.ListGroupsForMemberRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DirectoryServiceData.ListGroupsForMemberRequest { + return .init( + directoryId: self.directoryId, + maxResults: self.maxResults, + memberRealm: self.memberRealm, + nextToken: token, + realm: self.realm, + samAccountName: self.samAccountName + ) + } +} + +extension DirectoryServiceData.ListGroupsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DirectoryServiceData.ListGroupsRequest { + return .init( + directoryId: self.directoryId, + maxResults: self.maxResults, + nextToken: token, + realm: self.realm + ) + } +} + +extension DirectoryServiceData.ListUsersRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DirectoryServiceData.ListUsersRequest { + return .init( + directoryId: self.directoryId, + maxResults: self.maxResults, + nextToken: token, + realm: self.realm + ) + } +} + +extension DirectoryServiceData.SearchGroupsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DirectoryServiceData.SearchGroupsRequest { + return .init( + directoryId: self.directoryId, + maxResults: self.maxResults, + nextToken: token, + realm: self.realm, + searchAttributes: self.searchAttributes, + searchString: self.searchString + ) + } +} + +extension DirectoryServiceData.SearchUsersRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> DirectoryServiceData.SearchUsersRequest { + return .init( + directoryId: self.directoryId, + maxResults: self.maxResults, + nextToken: token, + realm: self.realm, + searchAttributes: self.searchAttributes, + searchString: self.searchString + ) + } +} diff --git a/Sources/Soto/Services/DirectoryServiceData/DirectoryServiceData_shapes.swift b/Sources/Soto/Services/DirectoryServiceData/DirectoryServiceData_shapes.swift new file mode 100644 index 0000000000..1c520f910e --- /dev/null +++ b/Sources/Soto/Services/DirectoryServiceData/DirectoryServiceData_shapes.swift @@ -0,0 +1,1630 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension DirectoryServiceData { + // MARK: Enums + + public enum GroupScope: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case builtinLocal = "BuiltinLocal" + case domainLocal = "DomainLocal" + case global = "Global" + case universal = "Universal" + public var description: String { return self.rawValue } + } + + public enum GroupType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case distribution = "Distribution" + case security = "Security" + public var description: String { return self.rawValue } + } + + public enum MemberType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case computer = "COMPUTER" + case group = "GROUP" + case user = "USER" + public var description: String { return self.rawValue } + } + + public enum UpdateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case add = "ADD" + case remove = "REMOVE" + case replace = "REPLACE" + public var description: String { return self.rawValue } + } + + public enum AttributeValue: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Indicates that the attribute type value is a boolean. For example: "BOOL": true + case bool(Bool) + /// Indicates that the attribute type value is a number. For example: "N": "16" + case n(Int64) + /// Indicates that the attribute type value is a string. For example: "S": "S Group" + case s(String) + /// Indicates that the attribute type value is a string set. For example: "SS": ["sample_service_class/host.sample.com:1234/sample_service_name_1", "sample_service_class/host.sample.com:1234/sample_service_name_2"] + case ss([String]) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .bool: + let value = try container.decode(Bool.self, forKey: .bool) + self = .bool(value) + case .n: + let value = try container.decode(Int64.self, forKey: .n) + self = .n(value) + case .s: + let value = try container.decode(String.self, forKey: .s) + self = .s(value) + case .ss: + let value = try container.decode([String].self, forKey: .ss) + self = .ss(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .bool(let value): + try container.encode(value, forKey: .bool) + case .n(let value): + try container.encode(value, forKey: .n) + case .s(let value): + try container.encode(value, forKey: .s) + case .ss(let value): + try container.encode(value, forKey: .ss) + } + } + + public func validate(name: String) throws { + switch self { + case .s(let value): + try self.validate(value, name: "s", parent: name, max: 1024) + try self.validate(value, name: "s", parent: name, min: 1) + case .ss(let value): + try value.forEach { + try validate($0, name: "ss[]", parent: name, max: 1024) + try validate($0, name: "ss[]", parent: name, min: 1) + } + try self.validate(value, name: "ss", parent: name, max: 25) + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case bool = "BOOL" + case n = "N" + case s = "S" + case ss = "SS" + } + } + + // MARK: Shapes + + public struct AddGroupMemberRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The name of the group. + public let groupName: String + /// The SAMAccountName of the user, group, or computer to add as a group member. + public let memberName: String + /// The domain name that's associated with the group member. This parameter is required only when adding a member outside of your Managed Microsoft AD domain to a group inside of your Managed Microsoft AD domain. This parameter defaults to the Managed Microsoft AD domain. This parameter is case insensitive. + public let memberRealm: String? + + @inlinable + public init(clientToken: String? = AddGroupMemberRequest.idempotencyToken(), directoryId: String, groupName: String, memberName: String, memberRealm: String? = nil) { + self.clientToken = clientToken + self.directoryId = directoryId + self.groupName = groupName + self.memberName = memberName + self.memberRealm = memberRealm + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encode(self.groupName, forKey: .groupName) + try container.encode(self.memberName, forKey: .memberName) + try container.encodeIfPresent(self.memberRealm, forKey: .memberRealm) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.groupName, name: "groupName", parent: name, max: 64) + try self.validate(self.groupName, name: "groupName", parent: name, min: 1) + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + try self.validate(self.memberName, name: "memberName", parent: name, max: 63) + try self.validate(self.memberName, name: "memberName", parent: name, min: 1) + try self.validate(self.memberName, name: "memberName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + try self.validate(self.memberRealm, name: "memberRealm", parent: name, max: 255) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, min: 1) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case groupName = "GroupName" + case memberName = "MemberName" + case memberRealm = "MemberRealm" + } + } + + public struct AddGroupMemberResult: AWSDecodableShape { + public init() {} + } + + public struct CreateGroupRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The scope of the AD group. For details, see Active Directory security group scope. + public let groupScope: GroupScope? + /// The AD group type. For details, see Active Directory security group type. + public let groupType: GroupType? + /// An expression that defines one or more attributes with the data type and value of each attribute. + public let otherAttributes: [String: AttributeValue]? + /// The name of the group. + public let samAccountName: String + + @inlinable + public init(clientToken: String? = CreateGroupRequest.idempotencyToken(), directoryId: String, groupScope: GroupScope? = nil, groupType: GroupType? = nil, otherAttributes: [String: AttributeValue]? = nil, samAccountName: String) { + self.clientToken = clientToken + self.directoryId = directoryId + self.groupScope = groupScope + self.groupType = groupType + self.otherAttributes = otherAttributes + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.groupScope, forKey: .groupScope) + try container.encodeIfPresent(self.groupType, forKey: .groupType) + try container.encodeIfPresent(self.otherAttributes, forKey: .otherAttributes) + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.otherAttributes?.forEach { + try validate($0.key, name: "otherAttributes.key", parent: name, max: 63) + try validate($0.key, name: "otherAttributes.key", parent: name, min: 1) + try validate($0.key, name: "otherAttributes.key", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + try $0.value.validate(name: "\(name).otherAttributes[\"\($0.key)\"]") + } + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, max: 25) + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 64) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case groupScope = "GroupScope" + case groupType = "GroupType" + case otherAttributes = "OtherAttributes" + case samAccountName = "SAMAccountName" + } + } + + public struct CreateGroupResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String? + /// The name of the group. + public let samAccountName: String? + /// The unique security identifier (SID) of the group. + public let sid: String? + + @inlinable + public init(directoryId: String? = nil, samAccountName: String? = nil, sid: String? = nil) { + self.directoryId = directoryId + self.samAccountName = samAccountName + self.sid = sid + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case samAccountName = "SAMAccountName" + case sid = "SID" + } + } + + public struct CreateUserRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that’s associated with the user. + public let directoryId: String + /// The email address of the user. + public let emailAddress: String? + /// The first name of the user. + public let givenName: String? + /// An expression that defines one or more attribute names with the data type and value of each attribute. A key is an attribute name, and the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes. Attribute names are case insensitive. + public let otherAttributes: [String: AttributeValue]? + /// The name of the user. + public let samAccountName: String + /// The last name of the user. + public let surname: String? + + @inlinable + public init(clientToken: String? = CreateUserRequest.idempotencyToken(), directoryId: String, emailAddress: String? = nil, givenName: String? = nil, otherAttributes: [String: AttributeValue]? = nil, samAccountName: String, surname: String? = nil) { + self.clientToken = clientToken + self.directoryId = directoryId + self.emailAddress = emailAddress + self.givenName = givenName + self.otherAttributes = otherAttributes + self.samAccountName = samAccountName + self.surname = surname + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.emailAddress, forKey: .emailAddress) + try container.encodeIfPresent(self.givenName, forKey: .givenName) + try container.encodeIfPresent(self.otherAttributes, forKey: .otherAttributes) + try container.encode(self.samAccountName, forKey: .samAccountName) + try container.encodeIfPresent(self.surname, forKey: .surname) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.emailAddress, name: "emailAddress", parent: name, max: 256) + try self.validate(self.emailAddress, name: "emailAddress", parent: name, min: 1) + try self.validate(self.givenName, name: "givenName", parent: name, max: 64) + try self.validate(self.givenName, name: "givenName", parent: name, min: 1) + try self.otherAttributes?.forEach { + try validate($0.key, name: "otherAttributes.key", parent: name, max: 63) + try validate($0.key, name: "otherAttributes.key", parent: name, min: 1) + try validate($0.key, name: "otherAttributes.key", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + try $0.value.validate(name: "\(name).otherAttributes[\"\($0.key)\"]") + } + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, max: 25) + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 20) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[\\w\\-.]+$") + try self.validate(self.surname, name: "surname", parent: name, max: 64) + try self.validate(self.surname, name: "surname", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case emailAddress = "EmailAddress" + case givenName = "GivenName" + case otherAttributes = "OtherAttributes" + case samAccountName = "SAMAccountName" + case surname = "Surname" + } + } + + public struct CreateUserResult: AWSDecodableShape { + /// The identifier (ID) of the directory where the address block is added. + public let directoryId: String? + /// The name of the user. + public let samAccountName: String? + /// The unique security identifier (SID) of the user. + public let sid: String? + + @inlinable + public init(directoryId: String? = nil, samAccountName: String? = nil, sid: String? = nil) { + self.directoryId = directoryId + self.samAccountName = samAccountName + self.sid = sid + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case samAccountName = "SAMAccountName" + case sid = "SID" + } + } + + public struct DeleteGroupRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The name of the group. + public let samAccountName: String + + @inlinable + public init(clientToken: String? = DeleteGroupRequest.idempotencyToken(), directoryId: String, samAccountName: String) { + self.clientToken = clientToken + self.directoryId = directoryId + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 64) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case samAccountName = "SAMAccountName" + } + } + + public struct DeleteGroupResult: AWSDecodableShape { + public init() {} + } + + public struct DeleteUserRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String + /// The name of the user. + public let samAccountName: String + + @inlinable + public init(clientToken: String? = DeleteUserRequest.idempotencyToken(), directoryId: String, samAccountName: String) { + self.clientToken = clientToken + self.directoryId = directoryId + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 20) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[\\w\\-.]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case samAccountName = "SAMAccountName" + } + } + + public struct DeleteUserResult: AWSDecodableShape { + public init() {} + } + + public struct DescribeGroupRequest: AWSEncodableShape { + /// The Identifier (ID) of the directory associated with the group. + public let directoryId: String + /// One or more attributes to be returned for the group. For a list of supported attributes, see Directory Service Data Attributes. + public let otherAttributes: [String]? + /// The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + public let realm: String? + /// The name of the group. + public let samAccountName: String + + @inlinable + public init(directoryId: String, otherAttributes: [String]? = nil, realm: String? = nil, samAccountName: String) { + self.directoryId = directoryId + self.otherAttributes = otherAttributes + self.realm = realm + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.otherAttributes, forKey: .otherAttributes) + try container.encodeIfPresent(self.realm, forKey: .realm) + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.otherAttributes?.forEach { + try validate($0, name: "otherAttributes[]", parent: name, max: 63) + try validate($0, name: "otherAttributes[]", parent: name, min: 1) + try validate($0, name: "otherAttributes[]", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + } + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, max: 25) + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 64) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + } + + private enum CodingKeys: String, CodingKey { + case otherAttributes = "OtherAttributes" + case realm = "Realm" + case samAccountName = "SAMAccountName" + } + } + + public struct DescribeGroupResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String? + /// The distinguished name of the object. + public let distinguishedName: String? + /// The scope of the AD group. For details, see Active Directory security groups. + public let groupScope: GroupScope? + /// The AD group type. For details, see Active Directory security group type. + public let groupType: GroupType? + /// The attribute values that are returned for the attribute names that are included in the request. + public let otherAttributes: [String: AttributeValue]? + /// The domain name that's associated with the group. + public let realm: String? + /// The name of the group. + public let samAccountName: String? + /// The unique security identifier (SID) of the group. + public let sid: String? + + @inlinable + public init(directoryId: String? = nil, distinguishedName: String? = nil, groupScope: GroupScope? = nil, groupType: GroupType? = nil, otherAttributes: [String: AttributeValue]? = nil, realm: String? = nil, samAccountName: String? = nil, sid: String? = nil) { + self.directoryId = directoryId + self.distinguishedName = distinguishedName + self.groupScope = groupScope + self.groupType = groupType + self.otherAttributes = otherAttributes + self.realm = realm + self.samAccountName = samAccountName + self.sid = sid + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case distinguishedName = "DistinguishedName" + case groupScope = "GroupScope" + case groupType = "GroupType" + case otherAttributes = "OtherAttributes" + case realm = "Realm" + case samAccountName = "SAMAccountName" + case sid = "SID" + } + } + + public struct DescribeUserRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String + /// One or more attribute names to be returned for the user. A key is an attribute name, and the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes. + public let otherAttributes: [String]? + /// The domain name that's associated with the user. This parameter is optional, so you can return users outside your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + public let realm: String? + /// The name of the user. + public let samAccountName: String + + @inlinable + public init(directoryId: String, otherAttributes: [String]? = nil, realm: String? = nil, samAccountName: String) { + self.directoryId = directoryId + self.otherAttributes = otherAttributes + self.realm = realm + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.otherAttributes, forKey: .otherAttributes) + try container.encodeIfPresent(self.realm, forKey: .realm) + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.otherAttributes?.forEach { + try validate($0, name: "otherAttributes[]", parent: name, max: 63) + try validate($0, name: "otherAttributes[]", parent: name, min: 1) + try validate($0, name: "otherAttributes[]", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + } + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, max: 25) + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 20) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[\\w\\-.]+$") + } + + private enum CodingKeys: String, CodingKey { + case otherAttributes = "OtherAttributes" + case realm = "Realm" + case samAccountName = "SAMAccountName" + } + } + + public struct DescribeUserResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String? + /// The distinguished name of the object. + public let distinguishedName: String? + /// The email address of the user. + public let emailAddress: String? + /// Indicates whether the user account is active. + public let enabled: Bool? + /// The first name of the user. + public let givenName: String? + /// The attribute values that are returned for the attribute names that are included in the request. Attribute names are case insensitive. + public let otherAttributes: [String: AttributeValue]? + /// The domain name that's associated with the user. + public let realm: String? + /// The name of the user. + public let samAccountName: String? + /// The unique security identifier (SID) of the user. + public let sid: String? + /// The last name of the user. + public let surname: String? + /// The UPN that is an Internet-style login name for a user and is based on the Internet standard RFC 822. The UPN is shorter than the distinguished name and easier to remember. + public let userPrincipalName: String? + + @inlinable + public init(directoryId: String? = nil, distinguishedName: String? = nil, emailAddress: String? = nil, enabled: Bool? = nil, givenName: String? = nil, otherAttributes: [String: AttributeValue]? = nil, realm: String? = nil, samAccountName: String? = nil, sid: String? = nil, surname: String? = nil, userPrincipalName: String? = nil) { + self.directoryId = directoryId + self.distinguishedName = distinguishedName + self.emailAddress = emailAddress + self.enabled = enabled + self.givenName = givenName + self.otherAttributes = otherAttributes + self.realm = realm + self.samAccountName = samAccountName + self.sid = sid + self.surname = surname + self.userPrincipalName = userPrincipalName + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case distinguishedName = "DistinguishedName" + case emailAddress = "EmailAddress" + case enabled = "Enabled" + case givenName = "GivenName" + case otherAttributes = "OtherAttributes" + case realm = "Realm" + case samAccountName = "SAMAccountName" + case sid = "SID" + case surname = "Surname" + case userPrincipalName = "UserPrincipalName" + } + } + + public struct DisableUserRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String + /// The name of the user. + public let samAccountName: String + + @inlinable + public init(clientToken: String? = DisableUserRequest.idempotencyToken(), directoryId: String, samAccountName: String) { + self.clientToken = clientToken + self.directoryId = directoryId + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 20) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[\\w\\-.]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case samAccountName = "SAMAccountName" + } + } + + public struct DisableUserResult: AWSDecodableShape { + public init() {} + } + + public struct Group: AWSDecodableShape { + /// The distinguished name of the object. + public let distinguishedName: String? + /// The scope of the AD group. For details, see Active Directory security groups + public let groupScope: GroupScope? + /// The AD group type. For details, see Active Directory security group type. + public let groupType: GroupType? + /// An expression of one or more attributes, data types, and the values of a group. + public let otherAttributes: [String: AttributeValue]? + /// The name of the group. + public let samAccountName: String + /// The unique security identifier (SID) of the group. + public let sid: String? + + @inlinable + public init(distinguishedName: String? = nil, groupScope: GroupScope? = nil, groupType: GroupType? = nil, otherAttributes: [String: AttributeValue]? = nil, samAccountName: String, sid: String? = nil) { + self.distinguishedName = distinguishedName + self.groupScope = groupScope + self.groupType = groupType + self.otherAttributes = otherAttributes + self.samAccountName = samAccountName + self.sid = sid + } + + private enum CodingKeys: String, CodingKey { + case distinguishedName = "DistinguishedName" + case groupScope = "GroupScope" + case groupType = "GroupType" + case otherAttributes = "OtherAttributes" + case samAccountName = "SAMAccountName" + case sid = "SID" + } + } + + public struct GroupSummary: AWSDecodableShape { + /// The scope of the AD group. For details, see Active Directory security groups. + public let groupScope: GroupScope + /// The AD group type. For details, see Active Directory security group type. + public let groupType: GroupType + /// The name of the group. + public let samAccountName: String + /// The unique security identifier (SID) of the group. + public let sid: String + + @inlinable + public init(groupScope: GroupScope, groupType: GroupType, samAccountName: String, sid: String) { + self.groupScope = groupScope + self.groupType = groupType + self.samAccountName = samAccountName + self.sid = sid + } + + private enum CodingKeys: String, CodingKey { + case groupScope = "GroupScope" + case groupType = "GroupType" + case samAccountName = "SAMAccountName" + case sid = "SID" + } + } + + public struct ListGroupMembersRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// The domain name that's associated with the group member. This parameter defaults to the Managed Microsoft AD domain. This parameter is optional and case insensitive. + public let memberRealm: String? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name that's associated with the group. This parameter is optional, so you can return members from a group outside of your Managed Microsoft AD domain. When no value is defined, only members of your Managed Microsoft AD groups are returned. This value is case insensitive. + public let realm: String? + /// The name of the group. + public let samAccountName: String + + @inlinable + public init(directoryId: String, maxResults: Int? = nil, memberRealm: String? = nil, nextToken: String? = nil, realm: String? = nil, samAccountName: String) { + self.directoryId = directoryId + self.maxResults = maxResults + self.memberRealm = memberRealm + self.nextToken = nextToken + self.realm = realm + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.memberRealm, forKey: .memberRealm) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.realm, forKey: .realm) + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, max: 255) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, min: 1) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 64) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case memberRealm = "MemberRealm" + case nextToken = "NextToken" + case realm = "Realm" + case samAccountName = "SAMAccountName" + } + } + + public struct ListGroupMembersResult: AWSDecodableShape { + /// Identifier (ID) of the directory associated with the group. + public let directoryId: String? + /// The domain name that's associated with the member. + public let memberRealm: String? + /// The member information that the request returns. + public let members: [Member]? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name that's associated with the group. + public let realm: String? + + @inlinable + public init(directoryId: String? = nil, memberRealm: String? = nil, members: [Member]? = nil, nextToken: String? = nil, realm: String? = nil) { + self.directoryId = directoryId + self.memberRealm = memberRealm + self.members = members + self.nextToken = nextToken + self.realm = realm + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case memberRealm = "MemberRealm" + case members = "Members" + case nextToken = "NextToken" + case realm = "Realm" + } + } + + public struct ListGroupsForMemberRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the member. + public let directoryId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// The domain name that's associated with the group member. This parameter is optional, so you can limit your results to the group members in a specific domain. This parameter is case insensitive and defaults to Realm + public let memberRealm: String? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive and defaults to your Managed Microsoft AD domain. + public let realm: String? + /// The SAMAccountName of the user, group, or computer that's a member of the group. + public let samAccountName: String + + @inlinable + public init(directoryId: String, maxResults: Int? = nil, memberRealm: String? = nil, nextToken: String? = nil, realm: String? = nil, samAccountName: String) { + self.directoryId = directoryId + self.maxResults = maxResults + self.memberRealm = memberRealm + self.nextToken = nextToken + self.realm = realm + self.samAccountName = samAccountName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.memberRealm, forKey: .memberRealm) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.realm, forKey: .realm) + try container.encode(self.samAccountName, forKey: .samAccountName) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, max: 255) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, min: 1) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 63) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case memberRealm = "MemberRealm" + case nextToken = "NextToken" + case realm = "Realm" + case samAccountName = "SAMAccountName" + } + } + + public struct ListGroupsForMemberResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the member. + public let directoryId: String? + /// The group information that the request returns. + public let groups: [GroupSummary]? + /// The domain that's associated with the member. + public let memberRealm: String? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain that's associated with the group. + public let realm: String? + + @inlinable + public init(directoryId: String? = nil, groups: [GroupSummary]? = nil, memberRealm: String? = nil, nextToken: String? = nil, realm: String? = nil) { + self.directoryId = directoryId + self.groups = groups + self.memberRealm = memberRealm + self.nextToken = nextToken + self.realm = realm + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case groups = "Groups" + case memberRealm = "MemberRealm" + case nextToken = "NextToken" + case realm = "Realm" + } + } + + public struct ListGroupsRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name associated with the directory. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + public let realm: String? + + @inlinable + public init(directoryId: String, maxResults: Int? = nil, nextToken: String? = nil, realm: String? = nil) { + self.directoryId = directoryId + self.maxResults = maxResults + self.nextToken = nextToken + self.realm = realm + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.realm, forKey: .realm) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case realm = "Realm" + } + } + + public struct ListGroupsResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String? + /// The group information that the request returns. + public let groups: [GroupSummary]? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name associated with the group. + public let realm: String? + + @inlinable + public init(directoryId: String? = nil, groups: [GroupSummary]? = nil, nextToken: String? = nil, realm: String? = nil) { + self.directoryId = directoryId + self.groups = groups + self.nextToken = nextToken + self.realm = realm + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case groups = "Groups" + case nextToken = "NextToken" + case realm = "Realm" + } + } + + public struct ListUsersRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name that's associated with the user. This parameter is optional, so you can return users outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + public let realm: String? + + @inlinable + public init(directoryId: String, maxResults: Int? = nil, nextToken: String? = nil, realm: String? = nil) { + self.directoryId = directoryId + self.maxResults = maxResults + self.nextToken = nextToken + self.realm = realm + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.realm, forKey: .realm) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case realm = "Realm" + } + } + + public struct ListUsersResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain that's associated with the user. + public let realm: String? + /// The user information that the request returns. + public let users: [UserSummary]? + + @inlinable + public init(directoryId: String? = nil, nextToken: String? = nil, realm: String? = nil, users: [UserSummary]? = nil) { + self.directoryId = directoryId + self.nextToken = nextToken + self.realm = realm + self.users = users + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case nextToken = "NextToken" + case realm = "Realm" + case users = "Users" + } + } + + public struct Member: AWSDecodableShape { + /// The AD type of the member object. + public let memberType: MemberType + /// The name of the group member. + public let samAccountName: String + /// The unique security identifier (SID) of the group member. + public let sid: String + + @inlinable + public init(memberType: MemberType, samAccountName: String, sid: String) { + self.memberType = memberType + self.samAccountName = samAccountName + self.sid = sid + } + + private enum CodingKeys: String, CodingKey { + case memberType = "MemberType" + case samAccountName = "SAMAccountName" + case sid = "SID" + } + } + + public struct RemoveGroupMemberRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the member. + public let directoryId: String + /// The name of the group. + public let groupName: String + /// The SAMAccountName of the user, group, or computer to remove from the group. + public let memberName: String + /// The domain name that's associated with the group member. This parameter defaults to the Managed Microsoft AD domain. This parameter is optional and case insensitive. + public let memberRealm: String? + + @inlinable + public init(clientToken: String? = RemoveGroupMemberRequest.idempotencyToken(), directoryId: String, groupName: String, memberName: String, memberRealm: String? = nil) { + self.clientToken = clientToken + self.directoryId = directoryId + self.groupName = groupName + self.memberName = memberName + self.memberRealm = memberRealm + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encode(self.groupName, forKey: .groupName) + try container.encode(self.memberName, forKey: .memberName) + try container.encodeIfPresent(self.memberRealm, forKey: .memberRealm) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.groupName, name: "groupName", parent: name, max: 64) + try self.validate(self.groupName, name: "groupName", parent: name, min: 1) + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + try self.validate(self.memberName, name: "memberName", parent: name, max: 63) + try self.validate(self.memberName, name: "memberName", parent: name, min: 1) + try self.validate(self.memberName, name: "memberName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + try self.validate(self.memberRealm, name: "memberRealm", parent: name, max: 255) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, min: 1) + try self.validate(self.memberRealm, name: "memberRealm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case groupName = "GroupName" + case memberName = "MemberName" + case memberRealm = "MemberRealm" + } + } + + public struct RemoveGroupMemberResult: AWSDecodableShape { + public init() {} + } + + public struct SearchGroupsRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name that's associated with the group. This parameter is optional, so you can return groups outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD groups are returned. This value is case insensitive. + public let realm: String? + /// One or more data attributes that are used to search for a group. For a list of supported attributes, see Directory Service Data Attributes. + public let searchAttributes: [String] + /// The attribute value that you want to search for. Wildcard (*) searches aren't supported. For a list of supported attributes, see Directory Service Data Attributes. + public let searchString: String + + @inlinable + public init(directoryId: String, maxResults: Int? = nil, nextToken: String? = nil, realm: String? = nil, searchAttributes: [String], searchString: String) { + self.directoryId = directoryId + self.maxResults = maxResults + self.nextToken = nextToken + self.realm = realm + self.searchAttributes = searchAttributes + self.searchString = searchString + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.realm, forKey: .realm) + try container.encode(self.searchAttributes, forKey: .searchAttributes) + try container.encode(self.searchString, forKey: .searchString) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.searchAttributes.forEach { + try validate($0, name: "searchAttributes[]", parent: name, max: 63) + try validate($0, name: "searchAttributes[]", parent: name, min: 1) + try validate($0, name: "searchAttributes[]", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + } + try self.validate(self.searchAttributes, name: "searchAttributes", parent: name, max: 25) + try self.validate(self.searchAttributes, name: "searchAttributes", parent: name, min: 1) + try self.validate(self.searchString, name: "searchString", parent: name, max: 64) + try self.validate(self.searchString, name: "searchString", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case realm = "Realm" + case searchAttributes = "SearchAttributes" + case searchString = "SearchString" + } + } + + public struct SearchGroupsResult: AWSDecodableShape { + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String? + /// The group information that the request returns. + public let groups: [Group]? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain that's associated with the group. + public let realm: String? + + @inlinable + public init(directoryId: String? = nil, groups: [Group]? = nil, nextToken: String? = nil, realm: String? = nil) { + self.directoryId = directoryId + self.groups = groups + self.nextToken = nextToken + self.realm = realm + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case groups = "Groups" + case nextToken = "NextToken" + case realm = "Realm" + } + } + + public struct SearchUsersRequest: AWSEncodableShape { + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain name that's associated with the user. This parameter is optional, so you can return users outside of your Managed Microsoft AD domain. When no value is defined, only your Managed Microsoft AD users are returned. This value is case insensitive. + public let realm: String? + /// One or more data attributes that are used to search for a user. For a list of supported attributes, see Directory Service Data Attributes. + public let searchAttributes: [String] + /// The attribute value that you want to search for. Wildcard (*) searches aren't supported. For a list of supported attributes, see Directory Service Data Attributes. + public let searchString: String + + @inlinable + public init(directoryId: String, maxResults: Int? = nil, nextToken: String? = nil, realm: String? = nil, searchAttributes: [String], searchString: String) { + self.directoryId = directoryId + self.maxResults = maxResults + self.nextToken = nextToken + self.realm = realm + self.searchAttributes = searchAttributes + self.searchString = searchString + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.maxResults, forKey: .maxResults) + try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.realm, forKey: .realm) + try container.encode(self.searchAttributes, forKey: .searchAttributes) + try container.encode(self.searchString, forKey: .searchString) + } + + public func validate(name: String) throws { + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 250) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, max: 255) + try self.validate(self.realm, name: "realm", parent: name, min: 1) + try self.validate(self.realm, name: "realm", parent: name, pattern: "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$") + try self.searchAttributes.forEach { + try validate($0, name: "searchAttributes[]", parent: name, max: 63) + try validate($0, name: "searchAttributes[]", parent: name, min: 1) + try validate($0, name: "searchAttributes[]", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + } + try self.validate(self.searchAttributes, name: "searchAttributes", parent: name, max: 25) + try self.validate(self.searchAttributes, name: "searchAttributes", parent: name, min: 1) + try self.validate(self.searchString, name: "searchString", parent: name, max: 64) + try self.validate(self.searchString, name: "searchString", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case maxResults = "MaxResults" + case nextToken = "NextToken" + case realm = "Realm" + case searchAttributes = "SearchAttributes" + case searchString = "SearchString" + } + } + + public struct SearchUsersResult: AWSDecodableShape { + /// The identifier (ID) of the directory where the address block is added. + public let directoryId: String? + /// An encoded paging token for paginated calls that can be passed back to retrieve the next page. + public let nextToken: String? + /// The domain that's associated with the user. + public let realm: String? + /// The user information that the request returns. + public let users: [User]? + + @inlinable + public init(directoryId: String? = nil, nextToken: String? = nil, realm: String? = nil, users: [User]? = nil) { + self.directoryId = directoryId + self.nextToken = nextToken + self.realm = realm + self.users = users + } + + private enum CodingKeys: String, CodingKey { + case directoryId = "DirectoryId" + case nextToken = "NextToken" + case realm = "Realm" + case users = "Users" + } + } + + public struct UpdateGroupRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the group. + public let directoryId: String + /// The scope of the AD group. For details, see Active Directory security groups. + public let groupScope: GroupScope? + /// The AD group type. For details, see Active Directory security group type. + public let groupType: GroupType? + /// An expression that defines one or more attributes with the data type and the value of each attribute. + public let otherAttributes: [String: AttributeValue]? + /// The name of the group. + public let samAccountName: String + /// The type of update to be performed. If no value exists for the attribute, use ADD. Otherwise, use REPLACE to change an attribute value or REMOVE to clear the attribute value. + public let updateType: UpdateType? + + @inlinable + public init(clientToken: String? = UpdateGroupRequest.idempotencyToken(), directoryId: String, groupScope: GroupScope? = nil, groupType: GroupType? = nil, otherAttributes: [String: AttributeValue]? = nil, samAccountName: String, updateType: UpdateType? = nil) { + self.clientToken = clientToken + self.directoryId = directoryId + self.groupScope = groupScope + self.groupType = groupType + self.otherAttributes = otherAttributes + self.samAccountName = samAccountName + self.updateType = updateType + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.groupScope, forKey: .groupScope) + try container.encodeIfPresent(self.groupType, forKey: .groupType) + try container.encodeIfPresent(self.otherAttributes, forKey: .otherAttributes) + try container.encode(self.samAccountName, forKey: .samAccountName) + try container.encodeIfPresent(self.updateType, forKey: .updateType) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.otherAttributes?.forEach { + try validate($0.key, name: "otherAttributes.key", parent: name, max: 63) + try validate($0.key, name: "otherAttributes.key", parent: name, min: 1) + try validate($0.key, name: "otherAttributes.key", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + try $0.value.validate(name: "\(name).otherAttributes[\"\($0.key)\"]") + } + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, max: 25) + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 64) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case groupScope = "GroupScope" + case groupType = "GroupType" + case otherAttributes = "OtherAttributes" + case samAccountName = "SAMAccountName" + case updateType = "UpdateType" + } + } + + public struct UpdateGroupResult: AWSDecodableShape { + public init() {} + } + + public struct UpdateUserRequest: AWSEncodableShape { + /// A unique and case-sensitive identifier that you provide to make sure the idempotency of the request, so multiple identical calls have the same effect as one single call. A client token is valid for 8 hours after the first request that uses it completes. After 8 hours, any request with the same client token is treated as a new request. If the request succeeds, any future uses of that token will be idempotent for another 8 hours. If you submit a request with the same client token but change one of the other parameters within the 8-hour idempotency window, Directory Service Data returns an ConflictException. This parameter is optional when using the CLI or SDK. + public let clientToken: String? + /// The identifier (ID) of the directory that's associated with the user. + public let directoryId: String + /// The email address of the user. + public let emailAddress: String? + /// The first name of the user. + public let givenName: String? + /// An expression that defines one or more attribute names with the data type and value of each attribute. A key is an attribute name, and the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes. Attribute names are case insensitive. + public let otherAttributes: [String: AttributeValue]? + /// The name of the user. + public let samAccountName: String + /// The last name of the user. + public let surname: String? + /// The type of update to be performed. If no value exists for the attribute, use ADD. Otherwise, use REPLACE to change an attribute value or REMOVE to clear the attribute value. + public let updateType: UpdateType? + + @inlinable + public init(clientToken: String? = UpdateUserRequest.idempotencyToken(), directoryId: String, emailAddress: String? = nil, givenName: String? = nil, otherAttributes: [String: AttributeValue]? = nil, samAccountName: String, surname: String? = nil, updateType: UpdateType? = nil) { + self.clientToken = clientToken + self.directoryId = directoryId + self.emailAddress = emailAddress + self.givenName = givenName + self.otherAttributes = otherAttributes + self.samAccountName = samAccountName + self.surname = surname + self.updateType = updateType + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + request.encodeQuery(self.directoryId, key: "DirectoryId") + try container.encodeIfPresent(self.emailAddress, forKey: .emailAddress) + try container.encodeIfPresent(self.givenName, forKey: .givenName) + try container.encodeIfPresent(self.otherAttributes, forKey: .otherAttributes) + try container.encode(self.samAccountName, forKey: .samAccountName) + try container.encodeIfPresent(self.surname, forKey: .surname) + try container.encodeIfPresent(self.updateType, forKey: .updateType) + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 128) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\x00-\\x7F]+$") + try self.validate(self.directoryId, name: "directoryId", parent: name, pattern: "^d-[0-9a-f]{10}$") + try self.validate(self.emailAddress, name: "emailAddress", parent: name, max: 256) + try self.validate(self.emailAddress, name: "emailAddress", parent: name, min: 1) + try self.validate(self.givenName, name: "givenName", parent: name, max: 64) + try self.validate(self.givenName, name: "givenName", parent: name, min: 1) + try self.otherAttributes?.forEach { + try validate($0.key, name: "otherAttributes.key", parent: name, max: 63) + try validate($0.key, name: "otherAttributes.key", parent: name, min: 1) + try validate($0.key, name: "otherAttributes.key", parent: name, pattern: "^[A-Za-z*][A-Za-z-*]*$") + try $0.value.validate(name: "\(name).otherAttributes[\"\($0.key)\"]") + } + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, max: 25) + try self.validate(self.otherAttributes, name: "otherAttributes", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, max: 20) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, min: 1) + try self.validate(self.samAccountName, name: "samAccountName", parent: name, pattern: "^[\\w\\-.]+$") + try self.validate(self.surname, name: "surname", parent: name, max: 64) + try self.validate(self.surname, name: "surname", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "ClientToken" + case emailAddress = "EmailAddress" + case givenName = "GivenName" + case otherAttributes = "OtherAttributes" + case samAccountName = "SAMAccountName" + case surname = "Surname" + case updateType = "UpdateType" + } + } + + public struct UpdateUserResult: AWSDecodableShape { + public init() {} + } + + public struct User: AWSDecodableShape { + /// The distinguished name of the object. + public let distinguishedName: String? + /// The email address of the user. + public let emailAddress: String? + /// Indicates whether the user account is active. + public let enabled: Bool? + /// The first name of the user. + public let givenName: String? + /// An expression that includes one or more attributes, data types, and values of a user. + public let otherAttributes: [String: AttributeValue]? + /// The name of the user. + public let samAccountName: String + /// The unique security identifier (SID) of the user. + public let sid: String? + /// The last name of the user. + public let surname: String? + /// The UPN that is an internet-style login name for a user and based on the internet standard RFC 822. The UPN is shorter than the distinguished name and easier to remember. + public let userPrincipalName: String? + + @inlinable + public init(distinguishedName: String? = nil, emailAddress: String? = nil, enabled: Bool? = nil, givenName: String? = nil, otherAttributes: [String: AttributeValue]? = nil, samAccountName: String, sid: String? = nil, surname: String? = nil, userPrincipalName: String? = nil) { + self.distinguishedName = distinguishedName + self.emailAddress = emailAddress + self.enabled = enabled + self.givenName = givenName + self.otherAttributes = otherAttributes + self.samAccountName = samAccountName + self.sid = sid + self.surname = surname + self.userPrincipalName = userPrincipalName + } + + private enum CodingKeys: String, CodingKey { + case distinguishedName = "DistinguishedName" + case emailAddress = "EmailAddress" + case enabled = "Enabled" + case givenName = "GivenName" + case otherAttributes = "OtherAttributes" + case samAccountName = "SAMAccountName" + case sid = "SID" + case surname = "Surname" + case userPrincipalName = "UserPrincipalName" + } + } + + public struct UserSummary: AWSDecodableShape { + /// Indicates whether the user account is active. + public let enabled: Bool + /// The first name of the user. + public let givenName: String? + /// The name of the user. + public let samAccountName: String + /// The unique security identifier (SID) of the user. + public let sid: String + /// The last name of the user. + public let surname: String? + + @inlinable + public init(enabled: Bool, givenName: String? = nil, samAccountName: String, sid: String, surname: String? = nil) { + self.enabled = enabled + self.givenName = givenName + self.samAccountName = samAccountName + self.sid = sid + self.surname = surname + } + + private enum CodingKeys: String, CodingKey { + case enabled = "Enabled" + case givenName = "GivenName" + case samAccountName = "SAMAccountName" + case sid = "SID" + case surname = "Surname" + } + } +} + +// MARK: - Errors + +/// Error enum for DirectoryServiceData +public struct DirectoryServiceDataErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case conflictException = "ConflictException" + case directoryUnavailableException = "DirectoryUnavailableException" + case internalServerException = "InternalServerException" + case resourceNotFoundException = "ResourceNotFoundException" + case throttlingException = "ThrottlingException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize DirectoryServiceData + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You don't have permission to perform the request or access the directory. It can also occur when the DirectoryId doesn't exist or the user, member, or group might be outside of your organizational unit (OU). Make sure that you have the authentication and authorization to perform the action. Review the directory information in the request, and make sure that the object isn't outside of your OU. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// This error will occur when you try to create a resource that conflicts with an existing object. It can also occur when adding a member to a group that the member is already in. This error can be caused by a request sent within the 8-hour idempotency window with the same client token but different input parameters. Client tokens should not be re-used across different requests. After 8 hours, any request with the same client token is treated as a new request. + public static var conflictException: Self { .init(.conflictException) } + /// The request could not be completed due to a problem in the configuration or current state of the specified directory. + public static var directoryUnavailableException: Self { .init(.directoryUnavailableException) } + /// The operation didn't succeed because an internal error occurred. Try again later. + public static var internalServerException: Self { .init(.internalServerException) } + /// The resource couldn't be found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The limit on the number of requests per second has been exceeded. + public static var throttlingException: Self { .init(.throttlingException) } + /// The request isn't valid. Review the details in the error message to update the invalid parameters or values in your request. + public static var validationException: Self { .init(.validationException) } +} + +extension DirectoryServiceDataErrorType: Equatable { + public static func == (lhs: DirectoryServiceDataErrorType, rhs: DirectoryServiceDataErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension DirectoryServiceDataErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift index 6719cb4a70..00657995f2 100644 --- a/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift +++ b/Sources/Soto/Services/DynamoDB/DynamoDB_shapes.swift @@ -6677,7 +6677,6 @@ extension DynamoDBErrorType: CustomStringConvertible { } } - extension DynamoDB.AttributeValue: Equatable { public static func == (lhs: Self, rhs: Self) -> Bool { switch (lhs, rhs) { @@ -6705,4 +6704,4 @@ extension DynamoDB.AttributeValue: Equatable { return false } } -} \ No newline at end of file +} diff --git a/Sources/Soto/Services/EC2/EC2_api.swift b/Sources/Soto/Services/EC2/EC2_api.swift index d362740d80..788314bc07 100644 --- a/Sources/Soto/Services/EC2/EC2_api.swift +++ b/Sources/Soto/Services/EC2/EC2_api.swift @@ -156,6 +156,42 @@ public struct EC2: AWSService { return try await self.acceptAddressTransfer(input, logger: logger) } + /// Accepts a request to assign billing of the available capacity of a shared Capacity Reservation to your + /// account. For more information, see + /// Billing assignment for shared Amazon EC2 Capacity Reservations. + @Sendable + @inlinable + public func acceptCapacityReservationBillingOwnership(_ input: AcceptCapacityReservationBillingOwnershipRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AcceptCapacityReservationBillingOwnershipResult { + try await self.client.execute( + operation: "AcceptCapacityReservationBillingOwnership", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Accepts a request to assign billing of the available capacity of a shared Capacity Reservation to your + /// account. For more information, see + /// Billing assignment for shared Amazon EC2 Capacity Reservations. + /// + /// Parameters: + /// - capacityReservationId: The ID of the Capacity Reservation for which to accept the request. + /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - logger: Logger use during operation + @inlinable + public func acceptCapacityReservationBillingOwnership( + capacityReservationId: String? = nil, + dryRun: Bool? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AcceptCapacityReservationBillingOwnershipResult { + let input = AcceptCapacityReservationBillingOwnershipRequest( + capacityReservationId: capacityReservationId, + dryRun: dryRun + ) + return try await self.acceptCapacityReservationBillingOwnership(input, logger: logger) + } + /// Accepts the Convertible Reserved Instance exchange quote described in the GetReservedInstancesExchangeQuote call. @Sendable @inlinable @@ -418,7 +454,7 @@ public struct EC2: AWSService { /// - customerOwnedIpv4Pool: The ID of a customer-owned address pool. Use this parameter to let Amazon EC2 select an address from the address pool. Alternatively, specify a specific address from the address pool. /// - domain: The network (vpc). /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - ipamPoolId: The ID of an IPAM pool. + /// - ipamPoolId: The ID of an IPAM pool which has an Amazon-provided or BYOIP public IPv4 CIDR provisioned to it. For more information, see Allocate sequential Elastic IP addresses from an IPAM pool in the Amazon VPC IPAM User Guide. /// - networkBorderGroup: A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. /// - publicIpv4Pool: The ID of an address pool that you own. Use this parameter to let Amazon EC2 select an address from the address pool. To specify a specific address from the address pool, use the Address parameter instead. /// - tagSpecifications: The tags to assign to the Elastic IP address. @@ -770,6 +806,47 @@ public struct EC2: AWSService { return try await self.associateAddress(input, logger: logger) } + /// Initiates a request to assign billing of the unused capacity of a shared Capacity Reservation to a consumer + /// account that is consolidated under the same Amazon Web Services organizations payer account. For more information, see + /// Billing assignment for shared + /// Amazon EC2 Capacity Reservations. + @Sendable + @inlinable + public func associateCapacityReservationBillingOwner(_ input: AssociateCapacityReservationBillingOwnerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateCapacityReservationBillingOwnerResult { + try await self.client.execute( + operation: "AssociateCapacityReservationBillingOwner", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Initiates a request to assign billing of the unused capacity of a shared Capacity Reservation to a consumer + /// account that is consolidated under the same Amazon Web Services organizations payer account. For more information, see + /// Billing assignment for shared + /// Amazon EC2 Capacity Reservations. + /// + /// Parameters: + /// - capacityReservationId: The ID of the Capacity Reservation. + /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - unusedReservationBillingOwnerId: The ID of the consumer account to which assign billing. + /// - logger: Logger use during operation + @inlinable + public func associateCapacityReservationBillingOwner( + capacityReservationId: String? = nil, + dryRun: Bool? = nil, + unusedReservationBillingOwnerId: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AssociateCapacityReservationBillingOwnerResult { + let input = AssociateCapacityReservationBillingOwnerRequest( + capacityReservationId: capacityReservationId, + dryRun: dryRun, + unusedReservationBillingOwnerId: unusedReservationBillingOwnerId + ) + return try await self.associateCapacityReservationBillingOwner(input, logger: logger) + } + /// Associates a target network with a Client VPN endpoint. A target network is a subnet in a VPC. You can associate multiple subnets from the same VPC with a Client VPN endpoint. You can associate only one subnet in each Availability Zone. We recommend that you associate at least two subnets to provide Availability Zone redundancy. If you specified a VPC when you created the Client VPN endpoint or if you have previous subnet associations, the specified subnet must be in the same VPC. To specify a subnet that's in a different VPC, you must first modify the Client VPN endpoint (ModifyClientVpnEndpoint) and change the VPC that's associated with it. @Sendable @inlinable @@ -1568,7 +1645,7 @@ public struct EC2: AWSService { return try await self.attachVolume(input, logger: logger) } - /// Attaches a virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time. For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN User Guide. + /// Attaches an available virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time. For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN User Guide. @Sendable @inlinable public func attachVpnGateway(_ input: AttachVpnGatewayRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AttachVpnGatewayResult { @@ -1581,7 +1658,7 @@ public struct EC2: AWSService { logger: logger ) } - /// Attaches a virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time. For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN User Guide. + /// Attaches an available virtual private gateway to a VPC. You can attach one virtual private gateway to one VPC at a time. For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN User Guide. /// /// Parameters: /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -1914,7 +1991,7 @@ public struct EC2: AWSService { return try await self.cancelCapacityReservationFleets(input, logger: logger) } - /// Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI. + /// Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. @Sendable @inlinable public func cancelConversionTask(_ input: CancelConversionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1927,7 +2004,7 @@ public struct EC2: AWSService { logger: logger ) } - /// Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. For more information, see Importing a Virtual Machine Using the Amazon EC2 CLI. + /// Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is in the process of transferring the final disk image, the command fails and returns an exception. /// /// Parameters: /// - conversionTaskId: The ID of the conversion task. @@ -2157,7 +2234,7 @@ public struct EC2: AWSService { /// Determines whether a product code is associated with an instance. This action can only be used by the owner of the product code. It is useful when a product code owner must verify whether another user's instance is eligible for support. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance. /// - productCode: The product code. This must be a product code that you own. /// - logger: Logger use during operation @@ -3481,7 +3558,7 @@ public struct EC2: AWSService { /// - description: A description for the IPAM pool. /// - dryRun: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - ipamScopeId: The ID of the scope in which you would like to create the IPAM pool. - /// - locale: The locale for the pool should be one of the following: An Amazon Web Services Region where you want this IPAM pool to be available for allocations. The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone. + /// - locale: The locale for the pool should be one of the following: An Amazon Web Services Region where you want this IPAM pool to be available for allocations. The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone. Default is none and means any locale. /// - publicIpSource: The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is byoip. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool if PublicIpSource is amazon. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide. /// - publiclyAdvertisable: Determines if the pool is publicly advertisable. This option is not available for pools with AddressFamily set to ipv4. /// - sourceIpamPoolId: The ID of the source IPAM pool. Use this option to create a pool within an existing pool. Note that the CIDR you provision for the pool within the source pool must be available in the source pool's CIDR range. @@ -4350,7 +4427,7 @@ public struct EC2: AWSService { /// Creates a placement group in which to launch instances. The strategy of the placement group determines how the instances are organized within the group. A cluster placement group is a logical grouping of instances within a single Availability Zone that benefit from low network latency, high network throughput. A spread placement group places instances on distinct hardware. A partition placement group places groups of instances in different partitions, where instances in one partition do not share the same hardware with instances in another partition. For more information, see Placement groups in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - groupName: A name for the placement group. Must be unique within the scope of your account for the Region. Constraints: Up to 255 ASCII characters /// - partitionCount: The number of partitions. Valid only when Strategy is set to partition. /// - spreadLevel: Determines how placement groups spread instances. Host – You can use host only with Outpost placement groups. Rack – No usage restrictions. @@ -4819,7 +4896,7 @@ public struct EC2: AWSService { /// Creates a data feed for Spot Instances, enabling you to view Spot Instance usage logs. You can create one data feed per Amazon Web Services account. For more information, see Spot Instance data feed in the Amazon EC2 User Guide. /// /// Parameters: - /// - bucket: The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For more information about bucket names, see Rules for bucket naming in the Amazon S3 Developer Guide. + /// - bucket: The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For more information about bucket names, see Bucket naming rules in the Amazon S3 User Guide. /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - prefix: The prefix for the data feed file names. /// - logger: Logger use during operation @@ -7581,7 +7658,7 @@ public struct EC2: AWSService { /// Deletes the specified placement group. You must terminate all instances in the placement group before you can delete the placement group. For more information, see Placement groups in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - groupName: The name of the placement group. /// - logger: Logger use during operation @inlinable @@ -9512,6 +9589,54 @@ public struct EC2: AWSService { return try await self.describeCapacityBlockOfferings(input, logger: logger) } + /// Describes a request to assign the billing of the unused capacity of a Capacity Reservation. + /// For more information, see + /// Billing assignment for shared Amazon EC2 Capacity Reservations. + @Sendable + @inlinable + public func describeCapacityReservationBillingRequests(_ input: DescribeCapacityReservationBillingRequestsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeCapacityReservationBillingRequestsResult { + try await self.client.execute( + operation: "DescribeCapacityReservationBillingRequests", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes a request to assign the billing of the unused capacity of a Capacity Reservation. + /// For more information, see + /// Billing assignment for shared Amazon EC2 Capacity Reservations. + /// + /// Parameters: + /// - capacityReservationIds: The ID of the Capacity Reservation. + /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: One or more filters. status - The state of the request (pending | accepted | + /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. + /// - nextToken: The token to use to retrieve the next page of results. + /// - role: Specify one of the following: odcr-owner - If you are the Capacity Reservation owner, specify this + /// - logger: Logger use during operation + @inlinable + public func describeCapacityReservationBillingRequests( + capacityReservationIds: [String]? = nil, + dryRun: Bool? = nil, + filters: [Filter]? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + role: CallerRole? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeCapacityReservationBillingRequestsResult { + let input = DescribeCapacityReservationBillingRequestsRequest( + capacityReservationIds: capacityReservationIds, + dryRun: dryRun, + filters: filters, + maxResults: maxResults, + nextToken: nextToken, + role: role + ) + return try await self.describeCapacityReservationBillingRequests(input, logger: logger) + } + /// Describes one or more Capacity Reservation Fleets. @Sendable @inlinable @@ -10088,7 +10213,7 @@ public struct EC2: AWSService { return try await self.describeEgressOnlyInternetGateways(input, logger: logger) } - /// Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances. Describes the Elastic Graphics accelerator associated with your instances. + /// Amazon Elastic Graphics reached end of life on January 8, 2024. Describes the Elastic Graphics accelerator associated with your instances. @Sendable @inlinable public func describeElasticGpus(_ input: DescribeElasticGpusRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeElasticGpusResult { @@ -10101,7 +10226,7 @@ public struct EC2: AWSService { logger: logger ) } - /// Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4, G5, or G6 instances. Describes the Elastic Graphics accelerator associated with your instances. + /// Amazon Elastic Graphics reached end of life on January 8, 2024. Describes the Elastic Graphics accelerator associated with your instances. /// /// Parameters: /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -10937,7 +11062,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - attribute: The instance attribute. Note: The enaSupport attribute is not supported at this time. - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance. /// - logger: Logger use during operation @inlinable @@ -11012,7 +11137,7 @@ public struct EC2: AWSService { /// Describes the credit option for CPU usage of the specified burstable performance instances. The credit options are standard and unlimited. If you do not specify an instance ID, Amazon EC2 returns burstable performance instances with the unlimited credit option, as well as instances that were previously configured as T2, T3, and T3a with the unlimited credit option. For example, if you resize a T2 instance, while it is configured as unlimited, to an M4 instance, Amazon EC2 returns the M4 instance. If you specify one or more instance IDs, Amazon EC2 returns the credit option (standard or unlimited) of those instances. If you specify an instance ID that is not valid, such as an instance that is not a burstable performance instance, an error is returned. Recently terminated instances might appear in the returned results. This interval is usually less than one hour. If an Availability Zone is experiencing a service disruption and you specify instance IDs in the affected zone, or do not specify any instance IDs at all, the call fails. If you specify only instance IDs in an unaffected zone, the call works normally. For more information, see Burstable performance instances in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. instance-id - The ID of the instance. /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 1000 explicitly specified instance IDs. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. @@ -11123,7 +11248,7 @@ public struct EC2: AWSService { /// Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances. Instance status includes the following components: Status checks - Amazon EC2 performs status checks on running EC2 instances to identify hardware and software issues. For more information, see Status checks for your instances and Troubleshoot instances with failed status checks in the Amazon EC2 User Guide. Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, or terminate) for your instances related to hardware issues, software updates, or system maintenance. For more information, see Scheduled events for your instances in the Amazon EC2 User Guide. Instance state - You can manage your instances from the moment you launch them through their termination. For more information, see Instance lifecycle in the Amazon EC2 User Guide. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. availability-zone - The Availability Zone of the instance. event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop). event.description - A description of the event. event.instance-event-id - The ID of the event whose date and time you are modifying. event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before-deadline - The deadline for starting the event (for example, 2014-09-15T17:15:20.000Z). instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data). instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data). system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). attached-ebs-status.status - The status of the attached EBS volume for the instance (ok | impaired | initializing | insufficient-data | not-applicable). /// - includeAllInstances: When true, includes the health status for all instances. When false, includes the health status for running instances only. Default: false /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. @@ -11151,7 +11276,7 @@ public struct EC2: AWSService { return try await self.describeInstanceStatus(input, logger: logger) } - /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. + /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. @Sendable @inlinable public func describeInstanceTopology(_ input: DescribeInstanceTopologyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeInstanceTopologyResult { @@ -11164,10 +11289,10 @@ public struct EC2: AWSService { logger: logger ) } - /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. + /// Describes a tree-based hierarchy that represents the physical host placement of your EC2 instances within an Availability Zone or Local Zone. You can use this information to determine the relative proximity of your EC2 instances within the Amazon Web Services network to support your tightly coupled workloads. Limitations Supported zones Availability Zone Local Zone Supported instance types hpc6a.48xlarge | hpc6id.32xlarge | hpc7a.12xlarge | hpc7a.24xlarge | hpc7a.48xlarge | hpc7a.96xlarge | hpc7g.4xlarge | hpc7g.8xlarge | hpc7g.16xlarge p3dn.24xlarge | p4d.24xlarge | p4de.24xlarge | p5.48xlarge | p5e.48xlarge trn1.2xlarge | trn1.32xlarge | trn1n.32xlarge For more information, see Amazon EC2 instance topology in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. availability-zone - The name of the Availability Zone (for example, us-west-2a) or Local Zone (for example, us-west-2-lax-1b) that the instance is in. instance-type - The instance type (for example, p4d.24xlarge) or instance family (for example, p4d*). You can use the * wildcard to match zero or more characters, or the ? wildcard to match zero or one character. zone-id - The ID of the Availability Zone (for example, usw2-az2) or Local Zone (for example, usw2-lax1-az1) that the instance is in. /// - groupNames: The name of the placement group that each instance is in. Constraints: Maximum 100 explicitly specified placement group names. /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. @@ -11293,8 +11418,8 @@ public struct EC2: AWSService { /// Describes the specified instances or all instances. If you specify instance IDs, the output includes information for only the specified instances. If you specify filters, the output includes information for only those instances that meet the filter criteria. If you do not specify instance IDs or filters, the output includes information for all instances, which can affect performance. We recommend that you use pagination to ensure that the operation returns quickly and successfully. If you specify an instance ID that is not valid, an error is returned. If you specify an instance that you do not own, it is not included in the output. Recently terminated instances might appear in the returned results. This interval is usually less than one hour. If you describe instances in the rare case where an Availability Zone is experiencing a service disruption and you specify instance IDs that are in the affected zone, or do not specify any instance IDs at all, the call fails. If you describe instances and specify only instance IDs that are in an unaffected zone, the call works normally. We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -12627,7 +12752,7 @@ public struct EC2: AWSService { /// Describes the specified placement groups or all of your placement groups. To describe a specific placement group that is shared with your account, you must specify the ID of the placement group using the GroupId parameter. Specifying the name of a shared placement group using the GroupNames parameter will result in an error. For more information, see Placement groups in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. group-name - The name of the placement group. group-arn - The Amazon Resource Name (ARN) of the placement group. spread-level - The spread level for the placement group (host | rack). state - The state of the placement group (pending | available | deleting | deleted). strategy - The strategy of the placement group (cluster | spread | partition). tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. /// - groupIds: The IDs of the placement groups. /// - groupNames: The names of the placement groups. Constraints: You can specify a name only if the placement group is owned by your account. If a placement group is shared with your account, specifying the name results in an error. You must use the GroupId parameter instead. @@ -16093,6 +16218,47 @@ public struct EC2: AWSService { return try await self.disassociateAddress(input, logger: logger) } + /// Cancels a pending request to assign billing of the unused capacity of a Capacity Reservation to a + /// consumer account, or revokes a request that has already been accepted. For more information, see + /// Billing assignment for + /// shared Amazon EC2 Capacity Reservations. + @Sendable + @inlinable + public func disassociateCapacityReservationBillingOwner(_ input: DisassociateCapacityReservationBillingOwnerRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateCapacityReservationBillingOwnerResult { + try await self.client.execute( + operation: "DisassociateCapacityReservationBillingOwner", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Cancels a pending request to assign billing of the unused capacity of a Capacity Reservation to a + /// consumer account, or revokes a request that has already been accepted. For more information, see + /// Billing assignment for + /// shared Amazon EC2 Capacity Reservations. + /// + /// Parameters: + /// - capacityReservationId: The ID of the Capacity Reservation. + /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - unusedReservationBillingOwnerId: The ID of the consumer account to which the request was sent. + /// - logger: Logger use during operation + @inlinable + public func disassociateCapacityReservationBillingOwner( + capacityReservationId: String? = nil, + dryRun: Bool? = nil, + unusedReservationBillingOwnerId: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisassociateCapacityReservationBillingOwnerResult { + let input = DisassociateCapacityReservationBillingOwnerRequest( + capacityReservationId: capacityReservationId, + dryRun: dryRun, + unusedReservationBillingOwnerId: unusedReservationBillingOwnerId + ) + return try await self.disassociateCapacityReservationBillingOwner(input, logger: logger) + } + /// Disassociates a target network from the specified Client VPN endpoint. When you disassociate the /// last target network from a Client VPN, the following happens: The route that was automatically added for the VPC is deleted All active client connections are terminated New client connections are disallowed The Client VPN endpoint's status changes to pending-associate @Sendable @@ -17596,7 +17762,7 @@ public struct EC2: AWSService { /// Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output includes the last three system event log errors. For more information, see Instance console output in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance. /// - latest: When enabled, retrieves the latest console output for the instance. Default: disabled (false) /// - logger: Logger use during operation @@ -17631,7 +17797,7 @@ public struct EC2: AWSService { /// Retrieve a JPG-format screenshot of a running instance to help with troubleshooting. The returned content is Base64-encoded. For more information, see Instance console output in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance. /// - wakeUp: When set to true, acts as keystroke input and wakes up an instance that's in standby or "sleep" mode. /// - logger: Logger use during operation @@ -17666,7 +17832,7 @@ public struct EC2: AWSService { /// Describes the default credit option for CPU usage of a burstable performance instance family. For more information, see Burstable performance instances in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceFamily: The instance family. /// - logger: Logger use during operation @inlinable @@ -17893,7 +18059,7 @@ public struct EC2: AWSService { /// Gets the default instance metadata service (IMDS) settings that are set at the account level in the specified Amazon Web Services
 Region. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - logger: Logger use during operation @inlinable public func getInstanceMetadataDefaults( @@ -18004,7 +18170,7 @@ public struct EC2: AWSService { /// A binary representation of the UEFI variable store. Only non-volatile variables are stored. This is a base64 encoded and zlib compressed binary value that must be properly encoded. When you use register-image to create an AMI, you can create an exact copy of your variable store by passing the UEFI data in the UefiData parameter. You can modify the UEFI data by using the python-uefivars tool on GitHub. You can use the tool to convert the UEFI data into a human-readable format (JSON), which you can inspect and modify, and then convert back into the binary format to use with register-image. For more information, see UEFI Secure Boot in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance from which to retrieve the UEFI data. /// - logger: Logger use during operation @inlinable @@ -18540,7 +18706,7 @@ public struct EC2: AWSService { /// Retrieves the encrypted administrator password for a running Windows instance. The Windows password is generated at boot by the EC2Config service or EC2Launch scripts (Windows Server 2016 and later). This usually only happens the first time an instance is launched. For more information, see EC2Config and EC2Launch in the Amazon EC2 User Guide. For the EC2Config service, the password is not generated for rebundled AMIs unless Ec2SetPassword is enabled before bundling. The password is encrypted using the key pair that you specified when you launched the instance. You must provide the corresponding key pair file. When you launch an instance, password generation and encryption may take a few minutes. If you try to retrieve the password before it's available, the output returns an empty string. We recommend that you wait up to 15 minutes after launching an instance before trying to retrieve the generated password. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the Windows instance. /// - logger: Logger use during operation @inlinable @@ -19358,7 +19524,7 @@ public struct EC2: AWSService { return try await self.importImage(input, logger: logger) } - /// We recommend that you use the ImportImage API. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide. Creates an import instance task using metadata from the specified disk image. This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file. This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. For information about the import manifest referenced by this API action, see VM Import Manifest. + /// We recommend that you use the ImportImage API instead. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide. Creates an import instance task using metadata from the specified disk image. This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. For information about the import manifest referenced by this API action, see VM Import Manifest. This API action is not supported by the Command Line Interface (CLI). @Sendable @inlinable public func importInstance(_ input: ImportInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportInstanceResult { @@ -19371,7 +19537,7 @@ public struct EC2: AWSService { logger: logger ) } - /// We recommend that you use the ImportImage API. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide. Creates an import instance task using metadata from the specified disk image. This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file. This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. For information about the import manifest referenced by this API action, see VM Import Manifest. + /// We recommend that you use the ImportImage API instead. For more information, see Importing a VM as an image using VM Import/Export in the VM Import/Export User Guide. Creates an import instance task using metadata from the specified disk image. This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. For information about the import manifest referenced by this API action, see VM Import Manifest. This API action is not supported by the Command Line Interface (CLI). /// /// Parameters: /// - description: A description for the instance being imported. @@ -19492,7 +19658,7 @@ public struct EC2: AWSService { return try await self.importSnapshot(input, logger: logger) } - /// Creates an import volume task using metadata from the specified disk image. This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead. This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file. For information about the import manifest referenced by this API action, see VM Import Manifest. + /// This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead. Creates an import volume task using metadata from the specified disk image. For information about the import manifest referenced by this API action, see VM Import Manifest. This API action is not supported by the Command Line Interface (CLI). @Sendable @inlinable public func importVolume(_ input: ImportVolumeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ImportVolumeResult { @@ -19505,7 +19671,7 @@ public struct EC2: AWSService { logger: logger ) } - /// Creates an import volume task using metadata from the specified disk image. This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead. This API action is not supported by the Command Line Interface (CLI). For information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file. For information about the import manifest referenced by this API action, see VM Import Manifest. + /// This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage instead. To import a disk to a snapshot, use ImportSnapshot instead. Creates an import volume task using metadata from the specified disk image. For information about the import manifest referenced by this API action, see VM Import Manifest. This API action is not supported by the Command Line Interface (CLI). /// /// Parameters: /// - availabilityZone: The Availability Zone for the resulting EBS volume. @@ -19911,7 +20077,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - cpuCredits: The credit option for CPU usage of the instance family. Valid Values: standard | unlimited - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceFamily: The instance family. /// - logger: Logger use during operation @inlinable @@ -20257,7 +20423,7 @@ public struct EC2: AWSService { /// - blockDeviceMappings: Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated. You can't modify the DeleteOnTermination attribute for volumes that are attached to Fargate tasks. To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Update the block device mapping when launching an instance in the Amazon EC2 User Guide. /// - disableApiStop: Indicates whether an instance is enabled for stop protection. For more information, see Enable stop protection for your instance. /// - disableApiTermination: If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this parameter for Spot Instances. - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - ebsOptimized: Specifies whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance. /// - enaSupport: Set to true to enable enhanced networking with ENA for the instance. This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable. /// - groups: Replaces the security groups of the instance with the specified security groups. You must specify the ID of at least one security group, even if it's just the default security group for the VPC. @@ -20353,6 +20519,54 @@ public struct EC2: AWSService { return try await self.modifyInstanceCapacityReservationAttributes(input, logger: logger) } + /// By default, all vCPUs for the instance type are active when you launch an instance. When you + /// configure the number of active vCPUs for the instance, it can help you save on licensing costs and + /// optimize performance. The base cost of the instance remains unchanged. The number of active vCPUs equals the number of threads per CPU core multiplied by the number + /// of cores. The instance must be in a Stopped state before you make changes. Some instance type options do not support this capability. For more information, see + /// Supported CPU + /// options in the Amazon EC2 User Guide. + @Sendable + @inlinable + public func modifyInstanceCpuOptions(_ input: ModifyInstanceCpuOptionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyInstanceCpuOptionsResult { + try await self.client.execute( + operation: "ModifyInstanceCpuOptions", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// By default, all vCPUs for the instance type are active when you launch an instance. When you + /// configure the number of active vCPUs for the instance, it can help you save on licensing costs and + /// optimize performance. The base cost of the instance remains unchanged. The number of active vCPUs equals the number of threads per CPU core multiplied by the number + /// of cores. The instance must be in a Stopped state before you make changes. Some instance type options do not support this capability. For more information, see + /// Supported CPU + /// options in the Amazon EC2 User Guide. + /// + /// Parameters: + /// - coreCount: The number of CPU cores to activate for the specified instance. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - instanceId: The ID of the instance to update. + /// - threadsPerCore: The number of threads to run for each CPU core. + /// - logger: Logger use during operation + @inlinable + public func modifyInstanceCpuOptions( + coreCount: Int? = nil, + dryRun: Bool? = nil, + instanceId: String? = nil, + threadsPerCore: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ModifyInstanceCpuOptionsResult { + let input = ModifyInstanceCpuOptionsRequest( + coreCount: coreCount, + dryRun: dryRun, + instanceId: instanceId, + threadsPerCore: threadsPerCore + ) + return try await self.modifyInstanceCpuOptions(input, logger: logger) + } + /// Modifies the credit option for CPU usage on a running or stopped burstable performance instance. The credit options are standard and unlimited. For more information, see Burstable performance instances in the Amazon EC2 User Guide. @Sendable @inlinable @@ -20370,7 +20584,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - clientToken: A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceCreditSpecifications: Information about the credit option for CPU usage. /// - logger: Logger use during operation @inlinable @@ -20404,7 +20618,7 @@ public struct EC2: AWSService { /// Modifies the start time for a scheduled Amazon EC2 instance event. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceEventId: The ID of the event whose date and time you are modifying. /// - instanceId: The ID of the instance with the scheduled event. /// - notBefore: The new date and time when the event will take place. @@ -20518,7 +20732,7 @@ public struct EC2: AWSService { /// Modifies the default instance metadata service (IMDS) settings at the account level in the specified Amazon Web Services
 Region. To remove a parameter's account-level default setting, specify no-preference. If an account-level setting is cleared with no-preference, then the instance launch considers the other instance metadata settings. For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - httpEndpoint: Enables or disables the IMDS endpoint on an instance. When disabled, the instance metadata can't be accessed. /// - httpPutResponseHopLimit: The maximum number of hops that the metadata token can travel. To indicate no preference, specify -1. Possible values: Integers from 1 to 64, and -1 to indicate no preference /// - httpTokens: Indicates whether IMDSv2 is required. optional – IMDSv2 is optional, which means that you can use either IMDSv2 or IMDSv1. required – IMDSv2 is required, which means that IMDSv1 is disabled, and you must use IMDSv2. @@ -22536,7 +22750,7 @@ public struct EC2: AWSService { /// Enables detailed monitoring for a running instance. Otherwise, basic monitoring is enabled. For more information, see Monitor your instances using CloudWatch in the Amazon EC2 User Guide. To disable detailed monitoring, see UnmonitorInstances. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceIds: The IDs of the instances. /// - logger: Logger use during operation @inlinable @@ -23024,7 +23238,7 @@ public struct EC2: AWSService { /// Requests a reboot of the specified instances. This operation is asynchronous; it only queues a request to reboot the specified instances. The operation succeeds if the instances are valid and belong to you. Requests to reboot terminated instances are ignored. If an instance does not cleanly shut down within a few minutes, Amazon EC2 performs a hard reboot. For more information about troubleshooting, see Troubleshoot an unreachable instance in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceIds: The instance IDs. /// - logger: Logger use during operation @inlinable @@ -23228,6 +23442,42 @@ public struct EC2: AWSService { return try await self.registerTransitGatewayMulticastGroupSources(input, logger: logger) } + /// Rejects a request to assign billing of the available capacity of a shared Capacity Reservation + /// to your account. For more information, see + /// Billing assignment for shared Amazon EC2 Capacity Reservations. + @Sendable + @inlinable + public func rejectCapacityReservationBillingOwnership(_ input: RejectCapacityReservationBillingOwnershipRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RejectCapacityReservationBillingOwnershipResult { + try await self.client.execute( + operation: "RejectCapacityReservationBillingOwnership", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Rejects a request to assign billing of the available capacity of a shared Capacity Reservation + /// to your account. For more information, see + /// Billing assignment for shared Amazon EC2 Capacity Reservations. + /// + /// Parameters: + /// - capacityReservationId: The ID of the Capacity Reservation for which to reject the request. + /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - logger: Logger use during operation + @inlinable + public func rejectCapacityReservationBillingOwnership( + capacityReservationId: String? = nil, + dryRun: Bool? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> RejectCapacityReservationBillingOwnershipResult { + let input = RejectCapacityReservationBillingOwnershipRequest( + capacityReservationId: capacityReservationId, + dryRun: dryRun + ) + return try await self.rejectCapacityReservationBillingOwnership(input, logger: logger) + } + /// Rejects a request to associate cross-account subnets with a transit gateway multicast domain. @Sendable @inlinable @@ -23850,8 +24100,7 @@ public struct EC2: AWSService { /// Submits feedback about the status of an instance. The instance must be in the running state. If your experience with the instance differs from the instance status returned by DescribeInstanceStatus, use ReportInstanceStatus to report your experience with the instance. Amazon EC2 collects this information to improve the accuracy of status checks. Use of this action does not change the value returned by DescribeInstanceStatus. /// /// Parameters: - /// - description: Descriptive text about the health state of your instance. - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - endTime: The time at which the reported instance health state ended. /// - instances: The instances. /// - reasonCodes: The reason codes that describe the health state of your instance. instance-stuck-in-state: My instance is stuck in a state. unresponsive: My instance is unresponsive. not-accepting-credentials: My instance is not accepting my credentials. password-not-available: A password is not available for my instance. performance-network: My instance is experiencing performance problems that I believe are network related. performance-instance-store: My instance is experiencing performance problems that I believe are related to the instance stores. performance-ebs-volume: My instance is experiencing performance problems that I believe are related to an EBS volume. performance-other: My instance is experiencing performance problems. other: [explain using the description parameter] @@ -23860,7 +24109,6 @@ public struct EC2: AWSService { /// - logger: Logger use during operation @inlinable public func reportInstanceStatus( - description: String? = nil, dryRun: Bool? = nil, endTime: Date? = nil, instances: [String]? = nil, @@ -23870,7 +24118,6 @@ public struct EC2: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws { let input = ReportInstanceStatusRequest( - description: description, dryRun: dryRun, endTime: endTime, instances: instances, @@ -24131,7 +24378,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - attribute: The attribute to reset. You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance. /// - logger: Logger use during operation @inlinable @@ -24571,10 +24818,10 @@ public struct EC2: AWSService { /// - creditSpecification: The credit option for CPU usage of the burstable performance instance. Valid values are standard and unlimited. To change this attribute after launch, use ModifyInstanceCreditSpecification. For more information, see Burstable performance instances in the Amazon EC2 User Guide. Default: standard (T2 instances) or unlimited (T3/T3a/T4g instances) For T3 instances with host tenancy, only standard is supported. /// - disableApiStop: Indicates whether an instance is enabled for stop protection. For more information, see Stop protection. /// - disableApiTermination: If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance. Default: false - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - ebsOptimized: Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance. Default: false /// - elasticGpuSpecification: An elastic GPU to associate with the instance. Amazon Elastic Graphics reached end of life on January 8, 2024. - /// - elasticInferenceAccelerators: An elastic inference accelerator to associate with the instance. Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see Amazon Elastic Inference FAQs. + /// - elasticInferenceAccelerators: An elastic inference accelerator to associate with the instance. Amazon Elastic Inference is no longer available. /// - enablePrimaryIpv6: If you’re launching an instance into a dual-stack or IPv6-only subnet, you can enable assigning a primary IPv6 address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if an instance relies on its IPv6 address not changing. When you launch the instance, Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address. /// - enclaveOptions: Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. /// - hibernationOptions: Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. You can't enable hibernation and Amazon Web Services Nitro Enclaves on the same instance. @@ -24873,7 +25120,7 @@ public struct EC2: AWSService { /// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger a kernel panic (on Linux instances), or a blue screen/stop error (on Windows instances). For instances based on Intel and AMD processors, the interrupt is received as a non-maskable interrupt (NMI). In general, the operating system crashes and reboots when a kernel panic or stop error is triggered. The operating system can also be configured to perform diagnostic tasks, such as generating a memory dump file, loading a secondary kernel, or obtaining a call trace. Before sending a diagnostic interrupt to your instance, ensure that its operating system is configured to perform the required diagnostic tasks. For more information about configuring your operating system to generate a crash dump when a kernel panic or stop error occurs, see Send a diagnostic interrupt (for advanced users) in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the instance. /// - logger: Logger use during operation @inlinable @@ -24906,7 +25153,7 @@ public struct EC2: AWSService { /// /// Parameters: /// - additionalInfo: Reserved. - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceIds: The IDs of the instances. /// - logger: Logger use during operation @inlinable @@ -25054,7 +25301,7 @@ public struct EC2: AWSService { /// Stops an Amazon EBS-backed instance. For more information, see Stop and start Amazon EC2 instances in the Amazon EC2 User Guide. You can use the Stop action to hibernate an instance if the instance is enabled for hibernation and it meets the hibernation prerequisites. For more information, see Hibernate your Amazon EC2 instance in the Amazon EC2 User Guide. We don't charge usage for a stopped instance, or data transfer fees; however, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage. You can't stop or hibernate instance store-backed instances. You can't use the Stop action to hibernate Spot Instances, but you can specify that Amazon EC2 should hibernate Spot Instances when they are interrupted. For more information, see Hibernating interrupted Spot Instances in the Amazon EC2 User Guide. When you stop or hibernate an instance, we shut it down. You can restart your instance at any time. Before stopping or hibernating an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM, but hibernating an instance does preserve data stored in RAM. If an instance cannot hibernate successfully, a normal shutdown occurs. Stopping and hibernating an instance is different to rebooting or terminating it. For example, when you stop or hibernate an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, the root device and any other devices attached during the instance launch are automatically deleted. For more information about the differences between rebooting, stopping, hibernating, and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide. When you stop an instance, we attempt to shut it down forcibly after a short while. If your instance appears stuck in the stopping state after a period of time, there may be an issue with the underlying host computer. For more information, see Troubleshoot stopping your instance in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - force: Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances. Default: false /// - hibernate: Hibernates the instance if the instance was enabled for hibernation at launch. If the instance cannot hibernate successfully, a normal shutdown occurs. For more information, see Hibernate your instance in the Amazon EC2 User Guide. Default: false /// - instanceIds: The IDs of the instances. @@ -25130,7 +25377,7 @@ public struct EC2: AWSService { /// Shuts down the specified instances. This operation is idempotent; if you terminate an instance more than once, each call succeeds. If you specify multiple instances and the request fails (for example, because of a single incorrect instance ID), none of the instances are terminated. If you terminate multiple instances across multiple Availability Zones, and one or more of the specified instances are enabled for termination protection, the request fails with the following results: The specified instances that are in the same Availability Zone as the protected instance are not terminated. The specified instances that are in different Availability Zones, where no other specified instances are protected, are successfully terminated. For example, say you have the following instances: Instance A: us-east-1a; Not protected Instance B: us-east-1a; Not protected Instance C: us-east-1b; Protected Instance D: us-east-1b; not protected If you attempt to terminate all of these instances in the same request, the request reports failure with the following results: Instance A and Instance B are successfully terminated because none of the specified instances in us-east-1a are enabled for termination protection. Instance C and Instance D fail to terminate because at least one of the specified instances in us-east-1b (Instance C) is enabled for termination protection. Terminated instances remain visible after termination (for approximately one hour). By default, Amazon EC2 deletes all EBS volumes that were attached when the instance launched. Volumes attached after instance launch continue running. You can stop, start, and terminate EBS-backed instances. You can only terminate instance store-backed instances. What happens to an instance differs if you stop it or terminate it. For example, when you stop an instance, the root device and any other devices attached to the instance persist. When you terminate an instance, any attached EBS volumes with the DeleteOnTermination block device mapping parameter set to true are automatically deleted. For more information about the differences between stopping and terminating instances, see Instance lifecycle in the Amazon EC2 User Guide. For more information about troubleshooting, see Troubleshooting terminating your instance in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceIds: The IDs of the instances. Constraints: Up to 1000 instance IDs. We recommend breaking up this request into smaller batches. /// - logger: Logger use during operation @inlinable @@ -25302,7 +25549,7 @@ public struct EC2: AWSService { /// Disables detailed monitoring for a running instance. For more information, see Monitoring your instances and volumes in the Amazon EC2 User Guide. /// /// Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceIds: The IDs of the instances. /// - logger: Logger use during operation @inlinable @@ -25670,6 +25917,52 @@ extension EC2 { return self.describeCapacityBlockOfferingsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``describeCapacityReservationBillingRequests(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func describeCapacityReservationBillingRequestsPaginator( + _ input: DescribeCapacityReservationBillingRequestsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeCapacityReservationBillingRequests, + inputKey: \DescribeCapacityReservationBillingRequestsRequest.nextToken, + outputKey: \DescribeCapacityReservationBillingRequestsResult.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``describeCapacityReservationBillingRequests(_:logger:)``. + /// + /// - Parameters: + /// - capacityReservationIds: The ID of the Capacity Reservation. + /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: One or more filters. status - The state of the request (pending | accepted | + /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. + /// - role: Specify one of the following: odcr-owner - If you are the Capacity Reservation owner, specify this + /// - logger: Logger used for logging + @inlinable + public func describeCapacityReservationBillingRequestsPaginator( + capacityReservationIds: [String]? = nil, + dryRun: Bool? = nil, + filters: [Filter]? = nil, + maxResults: Int? = nil, + role: CallerRole? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = DescribeCapacityReservationBillingRequestsRequest( + capacityReservationIds: capacityReservationIds, + dryRun: dryRun, + filters: filters, + maxResults: maxResults, + role: role + ) + return self.describeCapacityReservationBillingRequestsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``describeCapacityReservationFleets(_:logger:)``. /// /// - Parameters: @@ -26818,7 +27111,7 @@ extension EC2 { /// Return PaginatorSequence for operation ``describeInstanceCreditSpecifications(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. instance-id - The ID of the instance. /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 1000 explicitly specified instance IDs. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. @@ -26904,7 +27197,7 @@ extension EC2 { /// Return PaginatorSequence for operation ``describeInstanceStatus(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. availability-zone - The Availability Zone of the instance. event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop). event.description - A description of the event. event.instance-event-id - The ID of the event whose date and time you are modifying. event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before-deadline - The deadline for starting the event (for example, 2014-09-15T17:15:20.000Z). instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data). instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data). system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). attached-ebs-status.status - The status of the attached EBS volume for the instance (ok | impaired | initializing | insufficient-data | not-applicable). /// - includeAllInstances: When true, includes the health status for all instances. When false, includes the health status for running instances only. Default: false /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. @@ -26950,7 +27243,7 @@ extension EC2 { /// Return PaginatorSequence for operation ``describeInstanceTopology(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. availability-zone - The name of the Availability Zone (for example, us-west-2a) or Local Zone (for example, us-west-2-lax-1b) that the instance is in. instance-type - The instance type (for example, p4d.24xlarge) or instance family (for example, p4d*). You can use the * wildcard to match zero or more characters, or the ? wildcard to match zero or one character. zone-id - The ID of the Availability Zone (for example, usw2-az2) or Local Zone (for example, usw2-lax1-az1) that the instance is in. /// - groupNames: The name of the placement group that each instance is in. Constraints: Maximum 100 explicitly specified placement group names. /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. @@ -27082,8 +27375,8 @@ extension EC2 { /// Return PaginatorSequence for operation ``describeInstances(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - logger: Logger used for logging @@ -31651,6 +31944,20 @@ extension EC2.DescribeCapacityBlockOfferingsRequest: AWSPaginateToken { } } +extension EC2.DescribeCapacityReservationBillingRequestsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> EC2.DescribeCapacityReservationBillingRequestsRequest { + return .init( + capacityReservationIds: self.capacityReservationIds, + dryRun: self.dryRun, + filters: self.filters, + maxResults: self.maxResults, + nextToken: token, + role: self.role + ) + } +} + extension EC2.DescribeCapacityReservationFleetsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> EC2.DescribeCapacityReservationFleetsRequest { @@ -33877,8 +34184,8 @@ extension EC2 { /// Waiter for operation ``describeInstances(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -33929,8 +34236,8 @@ extension EC2 { /// Waiter for operation ``describeInstances(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -33978,7 +34285,7 @@ extension EC2 { /// Waiter for operation ``describeInstanceStatus(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. availability-zone - The Availability Zone of the instance. event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop). event.description - A description of the event. event.instance-event-id - The ID of the event whose date and time you are modifying. event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before-deadline - The deadline for starting the event (for example, 2014-09-15T17:15:20.000Z). instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data). instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data). system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). attached-ebs-status.status - The status of the attached EBS volume for the instance (ok | impaired | initializing | insufficient-data | not-applicable). /// - includeAllInstances: When true, includes the health status for all instances. When false, includes the health status for running instances only. Default: false /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. @@ -34031,8 +34338,8 @@ extension EC2 { /// Waiter for operation ``describeInstances(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -34081,8 +34388,8 @@ extension EC2 { /// Waiter for operation ``describeInstances(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - filters: The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. /// - instanceIds: The instance IDs. Default: Describes all your instances. /// - maxResults: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. /// - nextToken: The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request. @@ -34377,7 +34684,7 @@ extension EC2 { /// Waiter for operation ``getPasswordData(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - instanceId: The ID of the Windows instance. /// - logger: Logger used for logging @inlinable @@ -34724,7 +35031,7 @@ extension EC2 { /// Waiter for operation ``describeInstanceStatus(_:logger:)``. /// /// - Parameters: - /// - dryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// - dryRun: Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. /// - filters: The filters. availability-zone - The Availability Zone of the instance. event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop). event.description - A description of the event. event.instance-event-id - The ID of the event whose date and time you are modifying. event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before-deadline - The deadline for starting the event (for example, 2014-09-15T17:15:20.000Z). instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data). instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data). system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). attached-ebs-status.status - The status of the attached EBS volume for the instance (ok | impaired | initializing | insufficient-data | not-applicable). /// - includeAllInstances: When true, includes the health status for all instances. When false, includes the health status for running instances only. Default: false /// - instanceIds: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. diff --git a/Sources/Soto/Services/EC2/EC2_shapes.swift b/Sources/Soto/Services/EC2/EC2_shapes.swift index bd70c34b95..9e663b28ee 100644 --- a/Sources/Soto/Services/EC2/EC2_shapes.swift +++ b/Sources/Soto/Services/EC2/EC2_shapes.swift @@ -311,6 +311,12 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum CallerRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case odcrOwner = "odcr-owner" + case unusedReservationBillingOwner = "unused-reservation-billing-owner" + public var description: String { return self.rawValue } + } + public enum CancelBatchErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case fleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" case fleetRequestIdMalformed = "fleetRequestIdMalformed" @@ -328,6 +334,16 @@ extension EC2 { public var description: String { return self.rawValue } } + public enum CapacityReservationBillingRequestStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case accepted = "accepted" + case cancelled = "cancelled" + case expired = "expired" + case pending = "pending" + case rejected = "rejected" + case revoked = "revoked" + public var description: String { return self.rawValue } + } + public enum CapacityReservationFleetState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "active" case cancelled = "cancelled" @@ -774,7 +790,9 @@ extension EC2 { } public enum FleetCapacityReservationUsageStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case none = "none" case useCapacityReservationsFirst = "use-capacity-reservations-first" + case useCapacityReservationsOnly = "use-capacity-reservations-only" public var description: String { return self.rawValue } } @@ -1335,6 +1353,14 @@ extension EC2 { public static var g64Xlarge: Self { .init(rawValue: "g6.4xlarge") } public static var g68Xlarge: Self { .init(rawValue: "g6.8xlarge") } public static var g6Xlarge: Self { .init(rawValue: "g6.xlarge") } + public static var g6e12Xlarge: Self { .init(rawValue: "g6e.12xlarge") } + public static var g6e16Xlarge: Self { .init(rawValue: "g6e.16xlarge") } + public static var g6e24Xlarge: Self { .init(rawValue: "g6e.24xlarge") } + public static var g6e2Xlarge: Self { .init(rawValue: "g6e.2xlarge") } + public static var g6e48Xlarge: Self { .init(rawValue: "g6e.48xlarge") } + public static var g6e4Xlarge: Self { .init(rawValue: "g6e.4xlarge") } + public static var g6e8Xlarge: Self { .init(rawValue: "g6e.8xlarge") } + public static var g6eXlarge: Self { .init(rawValue: "g6e.xlarge") } public static var gr64Xlarge: Self { .init(rawValue: "gr6.4xlarge") } public static var gr68Xlarge: Self { .init(rawValue: "gr6.8xlarge") } public static var h116Xlarge: Self { .init(rawValue: "h1.16xlarge") } @@ -3580,6 +3606,38 @@ extension EC2 { } } + public struct AcceptCapacityReservationBillingOwnershipRequest: AWSEncodableShape { + /// The ID of the Capacity Reservation for which to accept the request. + public let capacityReservationId: String? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + + @inlinable + public init(capacityReservationId: String? = nil, dryRun: Bool? = nil) { + self.capacityReservationId = capacityReservationId + self.dryRun = dryRun + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationId = "CapacityReservationId" + case dryRun = "DryRun" + } + } + + public struct AcceptCapacityReservationBillingOwnershipResult: AWSDecodableShape { + /// Returns true if the request succeeds; otherwise, it returns an error. + public let `return`: Bool? + + @inlinable + public init(return: Bool? = nil) { + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case `return` = "return" + } + } + public struct AcceptReservedInstancesExchangeQuoteRequest: AWSEncodableShape { public struct _ReservedInstanceIdsEncoding: ArrayCoderProperties { public static let member = "ReservedInstanceId" } public struct _TargetConfigurationsEncoding: ArrayCoderProperties { public static let member = "TargetConfigurationRequest" } @@ -4232,7 +4290,7 @@ extension EC2 { public let domain: DomainType? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The ID of an IPAM pool. + /// The ID of an IPAM pool which has an Amazon-provided or BYOIP public IPv4 CIDR provisioned to it. For more information, see Allocate sequential Elastic IP addresses from an IPAM pool in the Amazon VPC IPAM User Guide. public let ipamPoolId: String? /// A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. public let networkBorderGroup: String? @@ -5078,6 +5136,48 @@ extension EC2 { } } + public struct AssociateCapacityReservationBillingOwnerRequest: AWSEncodableShape { + /// The ID of the Capacity Reservation. + public let capacityReservationId: String? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the consumer account to which assign billing. + public let unusedReservationBillingOwnerId: String? + + @inlinable + public init(capacityReservationId: String? = nil, dryRun: Bool? = nil, unusedReservationBillingOwnerId: String? = nil) { + self.capacityReservationId = capacityReservationId + self.dryRun = dryRun + self.unusedReservationBillingOwnerId = unusedReservationBillingOwnerId + } + + public func validate(name: String) throws { + try self.validate(self.unusedReservationBillingOwnerId, name: "unusedReservationBillingOwnerId", parent: name, max: 12) + try self.validate(self.unusedReservationBillingOwnerId, name: "unusedReservationBillingOwnerId", parent: name, min: 12) + try self.validate(self.unusedReservationBillingOwnerId, name: "unusedReservationBillingOwnerId", parent: name, pattern: "^[0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationId = "CapacityReservationId" + case dryRun = "DryRun" + case unusedReservationBillingOwnerId = "UnusedReservationBillingOwnerId" + } + } + + public struct AssociateCapacityReservationBillingOwnerResult: AWSDecodableShape { + /// Returns true if the request succeeds; otherwise, it returns an error. + public let `return`: Bool? + + @inlinable + public init(return: Bool? = nil) { + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case `return` = "return" + } + } + public struct AssociateClientVpnTargetNetworkRequest: AWSEncodableShape { /// Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. /// For more information, see Ensuring idempotency. @@ -7248,9 +7348,12 @@ extension EC2 { public let tenancy: CapacityReservationTenancy? /// The total number of instances for which the Capacity Reservation reserves capacity. public let totalInstanceCount: Int? + /// The ID of the Amazon Web Services account to which billing of the unused capacity + /// of the Capacity Reservation is assigned. + public let unusedReservationBillingOwnerId: String? @inlinable - public init(availabilityZone: String? = nil, availabilityZoneId: String? = nil, availableInstanceCount: Int? = nil, capacityAllocations: [CapacityAllocation]? = nil, capacityReservationArn: String? = nil, capacityReservationFleetId: String? = nil, capacityReservationId: String? = nil, createDate: Date? = nil, ebsOptimized: Bool? = nil, endDate: Date? = nil, endDateType: EndDateType? = nil, ephemeralStorage: Bool? = nil, instanceMatchCriteria: InstanceMatchCriteria? = nil, instancePlatform: CapacityReservationInstancePlatform? = nil, instanceType: String? = nil, outpostArn: String? = nil, ownerId: String? = nil, placementGroupArn: String? = nil, reservationType: CapacityReservationType? = nil, startDate: Date? = nil, state: CapacityReservationState? = nil, tags: [Tag]? = nil, tenancy: CapacityReservationTenancy? = nil, totalInstanceCount: Int? = nil) { + public init(availabilityZone: String? = nil, availabilityZoneId: String? = nil, availableInstanceCount: Int? = nil, capacityAllocations: [CapacityAllocation]? = nil, capacityReservationArn: String? = nil, capacityReservationFleetId: String? = nil, capacityReservationId: String? = nil, createDate: Date? = nil, ebsOptimized: Bool? = nil, endDate: Date? = nil, endDateType: EndDateType? = nil, ephemeralStorage: Bool? = nil, instanceMatchCriteria: InstanceMatchCriteria? = nil, instancePlatform: CapacityReservationInstancePlatform? = nil, instanceType: String? = nil, outpostArn: String? = nil, ownerId: String? = nil, placementGroupArn: String? = nil, reservationType: CapacityReservationType? = nil, startDate: Date? = nil, state: CapacityReservationState? = nil, tags: [Tag]? = nil, tenancy: CapacityReservationTenancy? = nil, totalInstanceCount: Int? = nil, unusedReservationBillingOwnerId: String? = nil) { self.availabilityZone = availabilityZone self.availabilityZoneId = availabilityZoneId self.availableInstanceCount = availableInstanceCount @@ -7275,6 +7378,7 @@ extension EC2 { self.tags = tags self.tenancy = tenancy self.totalInstanceCount = totalInstanceCount + self.unusedReservationBillingOwnerId = unusedReservationBillingOwnerId } private enum CodingKeys: String, CodingKey { @@ -7302,6 +7406,46 @@ extension EC2 { case tags = "tagSet" case tenancy = "tenancy" case totalInstanceCount = "totalInstanceCount" + case unusedReservationBillingOwnerId = "unusedReservationBillingOwnerId" + } + } + + public struct CapacityReservationBillingRequest: AWSDecodableShape { + /// The ID of the Capacity Reservation. + public let capacityReservationId: String? + /// Information about the Capacity Reservation. + public let capacityReservationInfo: CapacityReservationInfo? + /// The date and time, in UTC time format, at which the request was initiated. + public let lastUpdateTime: Date? + /// The ID of the Amazon Web Services account that initiated the request. + public let requestedBy: String? + /// The status of the request. For more information, see + /// View billing assignment requests for a shared Amazon EC2 Capacity Reservation. + public let status: CapacityReservationBillingRequestStatus? + /// Information about the status. + public let statusMessage: String? + /// The ID of the Amazon Web Services account to which the request was sent. + public let unusedReservationBillingOwnerId: String? + + @inlinable + public init(capacityReservationId: String? = nil, capacityReservationInfo: CapacityReservationInfo? = nil, lastUpdateTime: Date? = nil, requestedBy: String? = nil, status: CapacityReservationBillingRequestStatus? = nil, statusMessage: String? = nil, unusedReservationBillingOwnerId: String? = nil) { + self.capacityReservationId = capacityReservationId + self.capacityReservationInfo = capacityReservationInfo + self.lastUpdateTime = lastUpdateTime + self.requestedBy = requestedBy + self.status = status + self.statusMessage = statusMessage + self.unusedReservationBillingOwnerId = unusedReservationBillingOwnerId + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationId = "capacityReservationId" + case capacityReservationInfo = "capacityReservationInfo" + case lastUpdateTime = "lastUpdateTime" + case requestedBy = "requestedBy" + case status = "status" + case statusMessage = "statusMessage" + case unusedReservationBillingOwnerId = "unusedReservationBillingOwnerId" } } @@ -7433,6 +7577,28 @@ extension EC2 { } } + public struct CapacityReservationInfo: AWSDecodableShape { + /// The Availability Zone for the Capacity Reservation. + public let availabilityZone: String? + /// The instance type for the Capacity Reservation. + public let instanceType: String? + /// The tenancy of the Capacity Reservation. + public let tenancy: CapacityReservationTenancy? + + @inlinable + public init(availabilityZone: String? = nil, instanceType: String? = nil, tenancy: CapacityReservationTenancy? = nil) { + self.availabilityZone = availabilityZone + self.instanceType = instanceType + self.tenancy = tenancy + } + + private enum CodingKeys: String, CodingKey { + case availabilityZone = "availabilityZone" + case instanceType = "instanceType" + case tenancy = "tenancy" + } + } + public struct CapacityReservationOptions: AWSDecodableShape { /// Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity. If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price or prioritized) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price or prioritized). If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy. public let usageStrategy: FleetCapacityReservationUsageStrategy? @@ -8368,7 +8534,7 @@ extension EC2 { } public struct ConfirmProductInstanceRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance. public let instanceId: String? @@ -9750,7 +9916,7 @@ extension EC2 { public let launchTemplateAndOverrides: LaunchTemplateAndOverridesResponse? /// Indicates if the instance that was launched is a Spot Instance or On-Demand Instance. public let lifecycle: InstanceLifecycle? - /// The value is Windows for Windows instances. Otherwise, the value is blank. + /// The value is windows for Windows instances in an EC2 Fleet. Otherwise, the value is blank. public let platform: PlatformValues? @inlinable @@ -10359,7 +10525,7 @@ extension EC2 { public let dryRun: Bool? /// The ID of the scope in which you would like to create the IPAM pool. public let ipamScopeId: String? - /// The locale for the pool should be one of the following: An Amazon Web Services Region where you want this IPAM pool to be available for allocations. The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone. + /// The locale for the pool should be one of the following: An Amazon Web Services Region where you want this IPAM pool to be available for allocations. The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone. Default is none and means any locale. public let locale: String? /// The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is byoip. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool if PublicIpSource is amazon. For information on increasing the default limit, see Quotas for your IPAM in the Amazon VPC IPAM User Guide. public let publicIpSource: IpamPoolPublicIpSource? @@ -11482,7 +11648,7 @@ extension EC2 { public struct CreatePlacementGroupRequest: AWSEncodableShape { public struct _TagSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// A name for the placement group. Must be unique within the scope of your account for the Region. Constraints: Up to 255 ASCII characters public let groupName: String? @@ -11999,7 +12165,7 @@ extension EC2 { } public struct CreateSpotDatafeedSubscriptionRequest: AWSEncodableShape { - /// The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For more information about bucket names, see Rules for bucket naming in the Amazon S3 Developer Guide. + /// The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For more information about bucket names, see Bucket naming rules in the Amazon S3 User Guide. public let bucket: String? /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @@ -13041,7 +13207,7 @@ extension EC2 { public let dnsSupport: DnsSupportValue? /// Enable or disable IPv6 support. The default is disable. public let ipv6Support: Ipv6SupportValue? - /// This parameter is in preview and may not be available for your account. Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature. If you don't enable or disable SecurityGroupReferencingSupport in the request, the attachment will inherit the security group referencing support setting on the transit gateway. + /// Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management. This option is set to enable by default. However, at the transit gateway level the default is set to disable. For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide. public let securityGroupReferencingSupport: SecurityGroupReferencingSupportValue? @inlinable @@ -15473,7 +15639,7 @@ extension EC2 { } public struct DeletePlacementGroupRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The name of the placement group. public let groupName: String? @@ -17454,6 +17620,79 @@ extension EC2 { } } + public struct DescribeCapacityReservationBillingRequestsRequest: AWSEncodableShape { + public struct _CapacityReservationIdsEncoding: ArrayCoderProperties { public static let member = "item" } + public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } + + /// The ID of the Capacity Reservation. + @OptionalCustomCoding> + public var capacityReservationIds: [String]? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// One or more filters. status - The state of the request (pending | accepted | + /// rejected | cancelled | revoked | expired). requested-by - The account ID of the Capacity Reservation owner that initiated + /// the request. Not supported if you specify requested-by for Role. unused-reservation-billing-owner - The ID of the consumer account to which the + /// request was sent. Not supported if you specify unused-reservation-billing-owner for + /// Role. + @OptionalCustomCoding> + public var filters: [Filter]? + /// The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. + public let maxResults: Int? + /// The token to use to retrieve the next page of results. + public let nextToken: String? + /// Specify one of the following: odcr-owner - If you are the Capacity Reservation owner, specify this + /// value to view requests that you have initiated. Not supported with the requested-by + /// filter. unused-reservation-billing-owner - If you are the consumer account, + /// specify this value to view requests that have been sent to you. Not supported with the + /// unused-reservation-billing-owner filter. + public let role: CallerRole? + + @inlinable + public init(capacityReservationIds: [String]? = nil, dryRun: Bool? = nil, filters: [Filter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, role: CallerRole? = nil) { + self.capacityReservationIds = capacityReservationIds + self.dryRun = dryRun + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.role = role + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationIds = "CapacityReservationId" + case dryRun = "DryRun" + case filters = "Filter" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case role = "Role" + } + } + + public struct DescribeCapacityReservationBillingRequestsResult: AWSDecodableShape { + public struct _CapacityReservationBillingRequestsEncoding: ArrayCoderProperties { public static let member = "item" } + + /// Information about the request. + @OptionalCustomCoding> + public var capacityReservationBillingRequests: [CapacityReservationBillingRequest]? + /// The token to use to retrieve the next page of results. This value is null when there are no more results to return. + public let nextToken: String? + + @inlinable + public init(capacityReservationBillingRequests: [CapacityReservationBillingRequest]? = nil, nextToken: String? = nil) { + self.capacityReservationBillingRequests = capacityReservationBillingRequests + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationBillingRequests = "capacityReservationBillingRequestSet" + case nextToken = "nextToken" + } + } + public struct DescribeCapacityReservationFleetsRequest: AWSEncodableShape { public struct _CapacityReservationFleetIdsEncoding: ArrayCoderProperties { public static let member = "item" } public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } @@ -18842,7 +19081,7 @@ extension EC2 { public let launchTemplateAndOverrides: LaunchTemplateAndOverridesResponse? /// Indicates if the instance that was launched is a Spot Instance or On-Demand Instance. public let lifecycle: InstanceLifecycle? - /// The value is Windows for Windows instances. Otherwise, the value is blank. + /// The value is windows for Windows instances in an EC2 Fleet. Otherwise, the value is blank. public let platform: PlatformValues? @inlinable @@ -19591,7 +19830,7 @@ extension EC2 { public struct DescribeInstanceAttributeRequest: AWSEncodableShape { /// The instance attribute. Note: The enaSupport attribute is not supported at this time. public let attribute: InstanceAttributeName? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance. public let instanceId: String? @@ -19676,7 +19915,7 @@ extension EC2 { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The filters. instance-id - The ID of the instance. @OptionalCustomCoding> @@ -19827,7 +20066,7 @@ extension EC2 { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The filters. availability-zone - The Availability Zone of the instance. event.code - The code for the scheduled event (instance-reboot | system-reboot | system-maintenance | instance-retirement | instance-stop). event.description - A description of the event. event.instance-event-id - The ID of the event whose date and time you are modifying. event.not-after - The latest end time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before - The earliest start time for the scheduled event (for example, 2014-09-15T17:15:20.000Z). event.not-before-deadline - The deadline for starting the event (for example, 2014-09-15T17:15:20.000Z). instance-state-code - The code for the instance state, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-status.reachability - Filters on instance status where the name is reachability (passed | failed | initializing | insufficient-data). instance-status.status - The status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data). system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). attached-ebs-status.status - The status of the attached EBS volume for the instance (ok | impaired | initializing | insufficient-data | not-applicable). @OptionalCustomCoding> @@ -19887,7 +20126,7 @@ extension EC2 { public struct DescribeInstanceTopologyRequest: AWSEncodableShape { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The filters. availability-zone - The name of the Availability Zone (for example, us-west-2a) or Local Zone (for example, us-west-2-lax-1b) that the instance is in. instance-type - The instance type (for example, p4d.24xlarge) or instance family (for example, p4d*). You can use the * wildcard to match zero or more characters, or the ? wildcard to match zero or one character. zone-id - The ID of the Availability Zone (for example, usw2-az2) or Local Zone (for example, usw2-lax1-az1) that the instance is in. @OptionalCustomCoding> @@ -20076,9 +20315,9 @@ extension EC2 { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? - /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. iam-instance-profile.name - The instance profile associated with the instance. Specified as an name. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. + /// The filters. affinity - The affinity setting for an instance running on a Dedicated Host (default | host). architecture - The instance architecture (i386 | x86_64 | arm64). availability-zone - The Availability Zone of the instance. block-device-mapping.attach-time - The attach time for an EBS volume mapped to the instance, for example, 2022-09-15T17:15:20.000Z. block-device-mapping.delete-on-termination - A Boolean that indicates whether the EBS volume is deleted on instance termination. block-device-mapping.device-name - The device name specified in the block device mapping (for example, /dev/sdh or xvdh). block-device-mapping.status - The status for the EBS volume (attaching | attached | detaching | detached). block-device-mapping.volume-id - The volume ID of the EBS volume. boot-mode - The boot mode that was specified by the AMI (legacy-bios | uefi | uefi-preferred). capacity-reservation-id - The ID of the Capacity Reservation into which the instance was launched. capacity-reservation-specification.capacity-reservation-preference - The instance's Capacity Reservation preference (open | none). capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id - The ID of the targeted Capacity Reservation. capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn - The ARN of the targeted Capacity Reservation group. client-token - The idempotency token you provided when you launched the instance. current-instance-boot-mode - The boot mode that is used to launch the instance at launch or start (legacy-bios | uefi). dns-name - The public DNS name of the instance. ebs-optimized - A Boolean that indicates whether the instance is optimized for Amazon EBS I/O. ena-support - A Boolean that indicates whether the instance is enabled for enhanced networking with ENA. enclave-options.enabled - A Boolean that indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. hibernation-options.configured - A Boolean that indicates whether the instance is enabled for hibernation. A value of true means that the instance is enabled for hibernation. host-id - The ID of the Dedicated Host on which the instance is running, if applicable. hypervisor - The hypervisor type of the instance (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. iam-instance-profile.arn - The instance profile associated with the instance. Specified as an ARN. iam-instance-profile.id - The instance profile associated with the instance. Specified as an ID. image-id - The ID of the image used to launch the instance. instance-id - The ID of the instance. instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or a Capacity Block (spot | scheduled | capacity-block). instance-state-code - The state of the instance, as a 16-bit unsigned integer. The high byte is used for internal purposes and should be ignored. The low byte is set based on the state represented. The valid values are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). instance-state-name - The state of the instance (pending | running | shutting-down | terminated | stopping | stopped). instance-type - The type of instance (for example, t2.micro). instance.group-id - The ID of the security group for the instance. instance.group-name - The name of the security group for the instance. ip-address - The public IPv4 address of the instance. ipv6-address - The IPv6 address of the instance. kernel-id - The kernel ID. key-name - The name of the key pair used when the instance was launched. launch-index - When launching multiple instances, this is the index for the instance in the launch group (for example, 0, 1, 2, and so on). launch-time - The time when the instance was launched, in the ISO 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example, 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for example, 2021-09-29T*, which matches an entire day. maintenance-options.auto-recovery - The current automatic recovery behavior of the instance (disabled | default). metadata-options.http-endpoint - The status of access to the HTTP metadata endpoint on your instance (enabled | disabled) metadata-options.http-protocol-ipv4 - Indicates whether the IPv4 endpoint is enabled (disabled | enabled). metadata-options.http-protocol-ipv6 - Indicates whether the IPv6 endpoint is enabled (disabled | enabled). metadata-options.http-put-response-hop-limit - The HTTP metadata request put response hop limit (integer, possible values 1 to 64) metadata-options.http-tokens - The metadata request authorization state (optional | required) metadata-options.instance-metadata-tags - The status of access to instance tags from the instance metadata (enabled | disabled) metadata-options.state - The state of the metadata option changes (pending | applied). monitoring-state - Indicates whether detailed monitoring is enabled (disabled | enabled). network-interface.addresses.association.allocation-id - The allocation ID. network-interface.addresses.association.association-id - The association ID. network-interface.addresses.association.carrier-ip - The carrier IP address. network-interface.addresses.association.customer-owned-ip - The customer-owned IP address. network-interface.addresses.association.ip-owner-id - The owner ID of the private IPv4 address associated with the network interface. network-interface.addresses.association.public-dns-name - The public DNS name. network-interface.addresses.association.public-ip - The ID of the association of an Elastic IP address (IPv4) with a network interface. network-interface.addresses.primary - Specifies whether the IPv4 address of the network interface is the primary private IPv4 address. network-interface.addresses.private-dns-name - The private DNS name. network-interface.addresses.private-ip-address - The private IPv4 address associated with the network interface. network-interface.association.allocation-id - The allocation ID returned when you allocated the Elastic IP address (IPv4) for your network interface. network-interface.association.association-id - The association ID returned when the network interface was associated with an IPv4 address. network-interface.association.carrier-ip - The customer-owned IP address. network-interface.association.customer-owned-ip - The customer-owned IP address. network-interface.association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated with the network interface. network-interface.association.public-dns-name - The public DNS name. network-interface.association.public-ip - The address of the Elastic IP address (IPv4) bound to the network interface. network-interface.attachment.attach-time - The time that the network interface was attached to an instance. network-interface.attachment.attachment-id - The ID of the interface attachment. network-interface.attachment.delete-on-termination - Specifies whether the attachment is deleted when an instance is terminated. network-interface.attachment.device-index - The device index to which the network interface is attached. network-interface.attachment.instance-id - The ID of the instance to which the network interface is attached. network-interface.attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached. network-interface.attachment.network-card-index - The index of the network card. network-interface.attachment.status - The status of the attachment (attaching | attached | detaching | detached). network-interface.availability-zone - The Availability Zone for the network interface. network-interface.deny-all-igw-traffic - A Boolean that indicates whether a network interface with an IPv6 address is unreachable from the public internet. network-interface.description - The description of the network interface. network-interface.group-id - The ID of a security group associated with the network interface. network-interface.group-name - The name of a security group associated with the network interface. network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface. network-interface.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with the network interface. network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this is the primary IPv6 address. network-interface.ipv6-native - A Boolean that indicates whether this is an IPv6 only network interface. network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface. network-interface.mac-address - The MAC address of the network interface. network-interface.network-interface-id - The ID of the network interface. network-interface.outpost-arn - The ARN of the Outpost. network-interface.owner-id - The ID of the owner of the network interface. network-interface.private-dns-name - The private DNS name of the network interface. network-interface.private-ip-address - The private IPv4 address. network-interface.public-dns-name - The public DNS name. network-interface.requester-id - The requester ID for the network interface. network-interface.requester-managed - Indicates whether the network interface is being managed by Amazon Web Services. network-interface.status - The status of the network interface (available) | in-use). network-interface.source-dest-check - Whether the network interface performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. network-interface.subnet-id - The ID of the subnet for the network interface. network-interface.tag-key - The key of a tag assigned to the network interface. network-interface.tag-value - The value of a tag assigned to the network interface. network-interface.vpc-id - The ID of the VPC for the network interface. outpost-arn - The Amazon Resource Name (ARN) of the Outpost. owner-id - The Amazon Web Services account ID of the instance owner. placement-group-name - The name of the placement group for the instance. placement-partition-number - The partition in which the instance is located. platform - The platform. To list only Windows instances, use windows. platform-details - The platform (Linux/UNIX | Red Hat BYOL Linux | Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | Red Hat Enterprise Linux with SQL Server Standard and HA | Red Hat Enterprise Linux with SQL Server Enterprise and HA | Red Hat Enterprise Linux with SQL Server Standard | Red Hat Enterprise Linux with SQL Server Web | Red Hat Enterprise Linux with SQL Server Enterprise | SQL Server Enterprise | SQL Server Standard | SQL Server Web | SUSE Linux | Ubuntu Pro | Windows | Windows BYOL | Windows with SQL Server Enterprise | Windows with SQL Server Standard | Windows with SQL Server Web). private-dns-name - The private IPv4 DNS name of the instance. private-dns-name-options.enable-resource-name-dns-a-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS A records. private-dns-name-options.enable-resource-name-dns-aaaa-record - A Boolean that indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records. private-dns-name-options.hostname-type - The type of hostname (ip-name | resource-name). private-ip-address - The private IPv4 address of the instance. This can only be used to filter by the primary IP address of the network interface attached to the instance. To filter by additional IP addresses assigned to the network interface, use the filter network-interface.addresses.private-ip-address. product-code - The product code associated with the AMI used to launch the instance. product-code.type - The type of product code (devpay | marketplace). ramdisk-id - The RAM disk ID. reason - The reason for the current state of the instance (for example, shows "User Initiated [date]" when you stop or terminate the instance). Similar to the state-reason-code filter. requester-id - The ID of the entity that launched the instance on your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so on). reservation-id - The ID of the instance's reservation. A reservation ID is created any time you launch an instance. A reservation ID has a one-to-one relationship with an instance launch request, but can be associated with more than one instance if you launch multiple instances using the same launch request. For example, if you launch one instance, you get one reservation ID. If you launch ten instances using the same launch request, you also get one reservation ID. root-device-name - The device name of the root device volume (for example, /dev/sda1). root-device-type - The type of the root device volume (ebs | instance-store). source-dest-check - Indicates whether the instance performs source/destination checking. A value of true means that checking is enabled, and false means that checking is disabled. The value must be false for the instance to perform network address translation (NAT) in your VPC. spot-instance-request-id - The ID of the Spot Instance request. state-reason-code - The reason code for the state change. state-reason-message - A message that describes the state change. subnet-id - The ID of the subnet for the instance. tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. tenancy - The tenancy of an instance (dedicated | default | host). tpm-support - Indicates if the instance is configured for NitroTPM support (v2.0). usage-operation - The usage operation value for the instance (RunInstances | RunInstances:00g0 | RunInstances:0010 | RunInstances:1010 | RunInstances:1014 | RunInstances:1110 | RunInstances:0014 | RunInstances:0210 | RunInstances:0110 | RunInstances:0100 | RunInstances:0004 | RunInstances:0200 | RunInstances:000g | RunInstances:0g00 | RunInstances:0002 | RunInstances:0800 | RunInstances:0102 | RunInstances:0006 | RunInstances:0202). usage-operation-update-time - The time that the usage operation was last updated, for example, 2022-09-15T17:15:20.000Z. virtualization-type - The virtualization type of the instance (paravirtual | hvm). vpc-id - The ID of the VPC that the instance is running in. @OptionalCustomCoding> public var filters: [Filter]? /// The instance IDs. Default: Describes all your instances. @@ -22100,7 +22339,7 @@ extension EC2 { public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "Filter" } public struct _GroupIdsEncoding: ArrayCoderProperties { public static let member = "GroupId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The filters. group-name - The name of the placement group. group-arn - The Amazon Resource Name (ARN) of the placement group. spread-level - The spread level for the placement group (host | rack). state - The state of the placement group (pending | available | deleting | deleted). strategy - The strategy of the placement group (cluster | spread | partition). tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value. @OptionalCustomCoding> @@ -26974,6 +27213,48 @@ extension EC2 { } } + public struct DisassociateCapacityReservationBillingOwnerRequest: AWSEncodableShape { + /// The ID of the Capacity Reservation. + public let capacityReservationId: String? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the consumer account to which the request was sent. + public let unusedReservationBillingOwnerId: String? + + @inlinable + public init(capacityReservationId: String? = nil, dryRun: Bool? = nil, unusedReservationBillingOwnerId: String? = nil) { + self.capacityReservationId = capacityReservationId + self.dryRun = dryRun + self.unusedReservationBillingOwnerId = unusedReservationBillingOwnerId + } + + public func validate(name: String) throws { + try self.validate(self.unusedReservationBillingOwnerId, name: "unusedReservationBillingOwnerId", parent: name, max: 12) + try self.validate(self.unusedReservationBillingOwnerId, name: "unusedReservationBillingOwnerId", parent: name, min: 12) + try self.validate(self.unusedReservationBillingOwnerId, name: "unusedReservationBillingOwnerId", parent: name, pattern: "^[0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationId = "CapacityReservationId" + case dryRun = "DryRun" + case unusedReservationBillingOwnerId = "UnusedReservationBillingOwnerId" + } + } + + public struct DisassociateCapacityReservationBillingOwnerResult: AWSDecodableShape { + /// Returns true if the request succeeds; otherwise, it returns an error. + public let `return`: Bool? + + @inlinable + public init(return: Bool? = nil) { + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case `return` = "return" + } + } + public struct DisassociateClientVpnTargetNetworkRequest: AWSEncodableShape { /// The ID of the target network association. public let associationId: String? @@ -28042,7 +28323,7 @@ extension EC2 { } public struct ElasticGpuSpecificationResponse: AWSDecodableShape { - /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances. + /// The elastic GPU type. public let type: String? @inlinable @@ -30930,7 +31211,7 @@ extension EC2 { } public struct GetConsoleOutputRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance. public let instanceId: String? @@ -30974,7 +31255,7 @@ extension EC2 { } public struct GetConsoleScreenshotRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance. public let instanceId: String? @@ -31014,7 +31295,7 @@ extension EC2 { } public struct GetDefaultCreditSpecificationRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The instance family. public let instanceFamily: UnlimitedSupportedInstanceFamily? @@ -31283,7 +31564,7 @@ extension EC2 { } public struct GetInstanceMetadataDefaultsRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? @inlinable @@ -31430,7 +31711,7 @@ extension EC2 { } public struct GetInstanceUefiDataRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance from which to retrieve the UEFI data. public let instanceId: String? @@ -32162,7 +32443,7 @@ extension EC2 { } public struct GetPasswordDataRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the Windows instance. public let instanceId: String? @@ -34817,10 +35098,10 @@ extension EC2 { public let currentInstanceBootMode: InstanceBootModeValues? /// Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance. public let ebsOptimized: Bool? - /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances. + /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. @OptionalCustomCoding> public var elasticGpuAssociations: [ElasticGpuAssociation]? - /// The elastic inference accelerator associated with the instance. + /// Deprecated Amazon Elastic Inference is no longer available. @OptionalCustomCoding> public var elasticInferenceAcceleratorAssociations: [ElasticInferenceAcceleratorAssociation]? /// Specifies whether enhanced networking with ENA is enabled. @@ -36059,7 +36340,7 @@ extension EC2 { public var acceleratorNames: [AcceleratorName]? /// The minimum and maximum amount of total accelerator memory, in MiB. Default: No minimum or maximum limits public let acceleratorTotalMemoryMiB: AcceleratorTotalMemoryMiB? - /// The accelerator types that must be on the instance type. For instance types with GPU accelerators, specify gpu. For instance types with FPGA accelerators, specify fpga. For instance types with inference accelerators, specify inference. Default: Any accelerator type + /// The accelerator types that must be on the instance type. For instance types with GPU accelerators, specify gpu. For instance types with FPGA accelerators, specify fpga. Default: Any accelerator type @OptionalCustomCoding> public var acceleratorTypes: [AcceleratorType]? /// The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types. If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes. Default: All instance types @@ -36197,7 +36478,7 @@ extension EC2 { public var acceleratorNames: [AcceleratorName]? /// The minimum and maximum amount of total accelerator memory, in MiB. Default: No minimum or maximum limits public let acceleratorTotalMemoryMiB: AcceleratorTotalMemoryMiBRequest? - /// The accelerator types that must be on the instance type. To include instance types with GPU hardware, specify gpu. To include instance types with FPGA hardware, specify fpga. To include instance types with inference hardware, specify inference. Default: Any accelerator type + /// The accelerator types that must be on the instance type. To include instance types with GPU hardware, specify gpu. To include instance types with FPGA hardware, specify fpga. Default: Any accelerator type @OptionalCustomCoding> public var acceleratorTypes: [AcceleratorType]? /// The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types. If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes. Default: All instance types @@ -37255,11 +37536,13 @@ extension EC2 { public let resourceType: IpamResourceType? /// The last successful resource discovery time. public let sampleTime: Date? + /// The subnet ID. + public let subnetId: String? /// The VPC ID. public let vpcId: String? @inlinable - public init(availabilityZoneId: String? = nil, ipamResourceDiscoveryId: String? = nil, ipSource: IpamResourceCidrIpSource? = nil, ipUsage: Double? = nil, networkInterfaceAttachmentStatus: IpamNetworkInterfaceAttachmentStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, sampleTime: Date? = nil, vpcId: String? = nil) { + public init(availabilityZoneId: String? = nil, ipamResourceDiscoveryId: String? = nil, ipSource: IpamResourceCidrIpSource? = nil, ipUsage: Double? = nil, networkInterfaceAttachmentStatus: IpamNetworkInterfaceAttachmentStatus? = nil, resourceCidr: String? = nil, resourceId: String? = nil, resourceOwnerId: String? = nil, resourceRegion: String? = nil, resourceTags: [IpamResourceTag]? = nil, resourceType: IpamResourceType? = nil, sampleTime: Date? = nil, subnetId: String? = nil, vpcId: String? = nil) { self.availabilityZoneId = availabilityZoneId self.ipamResourceDiscoveryId = ipamResourceDiscoveryId self.ipSource = ipSource @@ -37272,6 +37555,7 @@ extension EC2 { self.resourceTags = resourceTags self.resourceType = resourceType self.sampleTime = sampleTime + self.subnetId = subnetId self.vpcId = vpcId } @@ -37288,6 +37572,7 @@ extension EC2 { case resourceTags = "resourceTagSet" case resourceType = "resourceType" case sampleTime = "sampleTime" + case subnetId = "subnetId" case vpcId = "vpcId" } } @@ -40764,7 +41049,7 @@ extension EC2 { public struct ModifyDefaultCreditSpecificationRequest: AWSEncodableShape { /// The credit option for CPU usage of the instance family. Valid Values: standard | unlimited public let cpuCredits: String? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The instance family. public let instanceFamily: UnlimitedSupportedInstanceFamily? @@ -41147,7 +41432,7 @@ extension EC2 { public let disableApiStop: AttributeBooleanValue? /// If the value is true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. You cannot use this parameter for Spot Instances. public let disableApiTermination: AttributeBooleanValue? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// Specifies whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS Optimized instance. public let ebsOptimized: AttributeBooleanValue? @@ -41253,12 +41538,62 @@ extension EC2 { } } + public struct ModifyInstanceCpuOptionsRequest: AWSEncodableShape { + /// The number of CPU cores to activate for the specified instance. + public let coreCount: Int? + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + /// The ID of the instance to update. + public let instanceId: String? + /// The number of threads to run for each CPU core. + public let threadsPerCore: Int? + + @inlinable + public init(coreCount: Int? = nil, dryRun: Bool? = nil, instanceId: String? = nil, threadsPerCore: Int? = nil) { + self.coreCount = coreCount + self.dryRun = dryRun + self.instanceId = instanceId + self.threadsPerCore = threadsPerCore + } + + private enum CodingKeys: String, CodingKey { + case coreCount = "CoreCount" + case dryRun = "DryRun" + case instanceId = "InstanceId" + case threadsPerCore = "ThreadsPerCore" + } + } + + public struct ModifyInstanceCpuOptionsResult: AWSDecodableShape { + /// The number of CPU cores that are running for the specified instance after the + /// update. + public let coreCount: Int? + /// The ID of the instance that was updated. + public let instanceId: String? + /// The number of threads that are running per CPU core for the specified + /// instance after the update. + public let threadsPerCore: Int? + + @inlinable + public init(coreCount: Int? = nil, instanceId: String? = nil, threadsPerCore: Int? = nil) { + self.coreCount = coreCount + self.instanceId = instanceId + self.threadsPerCore = threadsPerCore + } + + private enum CodingKeys: String, CodingKey { + case coreCount = "coreCount" + case instanceId = "instanceId" + case threadsPerCore = "threadsPerCore" + } + } + public struct ModifyInstanceCreditSpecificationRequest: AWSEncodableShape { public struct _InstanceCreditSpecificationsEncoding: ArrayCoderProperties { public static let member = "item" } /// A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency. public let clientToken: String? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// Information about the credit option for CPU usage. @OptionalCustomCoding> @@ -41302,7 +41637,7 @@ extension EC2 { } public struct ModifyInstanceEventStartTimeRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the event whose date and time you are modifying. public let instanceEventId: String? @@ -41433,7 +41768,7 @@ extension EC2 { } public struct ModifyInstanceMetadataDefaultsRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// Enables or disables the IMDS endpoint on an instance. When disabled, the instance metadata can't be accessed. public let httpEndpoint: DefaultInstanceMetadataEndpointState? @@ -42599,7 +42934,8 @@ extension EC2 { /// Removes CIDR blocks for the transit gateway. @OptionalCustomCoding> public var removeTransitGatewayCidrBlocks: [String]? - /// This parameter is in preview and may not be available for your account. Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature. + /// Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management. + /// This option is disabled by default. For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide. public let securityGroupReferencingSupport: SecurityGroupReferencingSupportValue? /// Enable or disable Equal Cost Multipath Protocol support. public let vpnEcmpSupport: VpnEcmpSupportValue? @@ -42760,7 +43096,8 @@ extension EC2 { public let dnsSupport: DnsSupportValue? /// Enable or disable IPv6 support. The default is enable. public let ipv6Support: Ipv6SupportValue? - /// This parameter is in preview and may not be available for your account. Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature. + /// Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management. + /// This option is disabled by default. For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide. public let securityGroupReferencingSupport: SecurityGroupReferencingSupportValue? @inlinable @@ -44024,7 +44361,7 @@ extension EC2 { public struct MonitorInstancesRequest: AWSEncodableShape { public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The IDs of the instances. @OptionalCustomCoding> @@ -47119,7 +47456,7 @@ extension EC2 { public struct RebootInstancesRequest: AWSEncodableShape { public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The instance IDs. @OptionalCustomCoding> @@ -47455,6 +47792,38 @@ extension EC2 { } } + public struct RejectCapacityReservationBillingOwnershipRequest: AWSEncodableShape { + /// The ID of the Capacity Reservation for which to reject the request. + public let capacityReservationId: String? + /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + public let dryRun: Bool? + + @inlinable + public init(capacityReservationId: String? = nil, dryRun: Bool? = nil) { + self.capacityReservationId = capacityReservationId + self.dryRun = dryRun + } + + private enum CodingKeys: String, CodingKey { + case capacityReservationId = "CapacityReservationId" + case dryRun = "DryRun" + } + } + + public struct RejectCapacityReservationBillingOwnershipResult: AWSDecodableShape { + /// Returns true if the request succeeds; otherwise, it returns an error. + public let `return`: Bool? + + @inlinable + public init(return: Bool? = nil) { + self.`return` = `return` + } + + private enum CodingKeys: String, CodingKey { + case `return` = "return" + } + } + public struct RejectTransitGatewayMulticastDomainAssociationsRequest: AWSEncodableShape { public struct _SubnetIdsEncoding: ArrayCoderProperties { public static let member = "item" } @@ -48149,7 +48518,7 @@ extension EC2 { /// Descriptive text about the health state of your instance. public let description: String? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The time at which the reported instance health state ended. public let endTime: Date? @@ -48164,6 +48533,18 @@ extension EC2 { /// The status of all instances listed. public let status: ReportStatusType? + @inlinable + public init(dryRun: Bool? = nil, endTime: Date? = nil, instances: [String]? = nil, reasonCodes: [ReportInstanceReasonCodes]? = nil, startTime: Date? = nil, status: ReportStatusType? = nil) { + self.description = nil + self.dryRun = dryRun + self.endTime = endTime + self.instances = instances + self.reasonCodes = reasonCodes + self.startTime = startTime + self.status = status + } + + @available(*, deprecated, message: "Members description have been deprecated") @inlinable public init(description: String? = nil, dryRun: Bool? = nil, endTime: Date? = nil, instances: [String]? = nil, reasonCodes: [ReportInstanceReasonCodes]? = nil, startTime: Date? = nil, status: ReportStatusType? = nil) { self.description = description @@ -48254,10 +48635,10 @@ extension EC2 { public let disableApiTermination: Bool? /// Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance. public let ebsOptimized: Bool? - /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances. + /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. @OptionalCustomCoding> public var elasticGpuSpecifications: [ElasticGpuSpecification]? - /// An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify accelerators from different generations in the same request. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Deprecated. Amazon Elastic Inference is no longer available. @OptionalCustomCoding> public var elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAccelerator]? /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, see What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide. You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same instance. @@ -49205,7 +49586,7 @@ extension EC2 { public struct ResetInstanceAttributeRequest: AWSEncodableShape { /// The attribute to reset. You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. public let attribute: InstanceAttributeName? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance. public let instanceId: String? @@ -49357,10 +49738,10 @@ extension EC2 { public let disableApiTermination: Bool? /// Indicates whether the instance is optimized for Amazon EBS I/O. public let ebsOptimized: Bool? - /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances. + /// Deprecated. Amazon Elastic Graphics reached end of life on January 8, 2024. @OptionalCustomCoding> public var elasticGpuSpecifications: [ElasticGpuSpecificationResponse]? - /// An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify accelerators from different generations in the same request. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Deprecated. Amazon Elastic Inference is no longer available. @OptionalCustomCoding> public var elasticInferenceAccelerators: [LaunchTemplateElasticInferenceAcceleratorResponse]? /// Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. @@ -50194,14 +50575,14 @@ extension EC2 { public let disableApiStop: Bool? /// If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance. Default: false public let disableApiTermination: Bool? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance. Default: false public let ebsOptimized: Bool? /// An elastic GPU to associate with the instance. Amazon Elastic Graphics reached end of life on January 8, 2024. @OptionalCustomCoding> public var elasticGpuSpecification: [ElasticGpuSpecification]? - /// An elastic inference accelerator to associate with the instance. Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see Amazon Elastic Inference FAQs. + /// An elastic inference accelerator to associate with the instance. Amazon Elastic Inference is no longer available. @OptionalCustomCoding> public var elasticInferenceAccelerators: [ElasticInferenceAccelerator]? /// If you’re launching an instance into a dual-stack or IPv6-only subnet, you can enable assigning a primary IPv6 address. A primary IPv6 address is an IPv6 GUA address associated with an ENI that you have enabled to use a primary IPv6 address. Use this option if an instance relies on its IPv6 address not changing. When you launch the instance, Amazon Web Services will automatically assign an IPv6 address associated with the ENI attached to your instance to be the primary IPv6 address. Once you enable an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. If you have multiple IPv6 addresses associated with an ENI attached to your instance and you enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI becomes the primary IPv6 address. @@ -51383,7 +51764,7 @@ extension EC2 { } public struct SendDiagnosticInterruptRequest: AWSEncodableShape { - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The ID of the instance. public let instanceId: String? @@ -51871,7 +52252,7 @@ extension EC2 { } public struct SnapshotTaskDetail: AWSDecodableShape { - /// The description of the snapshot. + /// The description of the disk image being imported. public let description: String? /// The size of the disk in the snapshot, in GiB. public let diskImageSize: Double? @@ -52751,7 +53132,7 @@ extension EC2 { /// Reserved. public let additionalInfo: String? - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The IDs of the instances. @OptionalCustomCoding> @@ -52946,7 +53327,7 @@ extension EC2 { public struct StopInstancesRequest: AWSEncodableShape { public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// Forces the instances to stop. The instances do not have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures. This option is not recommended for Windows instances. Default: false public let force: Bool? @@ -53676,7 +54057,7 @@ extension EC2 { public struct TerminateInstancesRequest: AWSEncodableShape { public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The IDs of the instances. Constraints: Up to 1000 instance IDs. We recommend breaking up this request into smaller batches. @OptionalCustomCoding> @@ -54655,7 +55036,8 @@ extension EC2 { public let multicastSupport: MulticastSupportValue? /// The ID of the default propagation route table. public let propagationDefaultRouteTableId: String? - /// This parameter is in preview and may not be available for your account. Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature. + /// Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management. + /// This option is disabled by default. public let securityGroupReferencingSupport: SecurityGroupReferencingSupportValue? /// The transit gateway CIDR blocks. @OptionalCustomCoding> @@ -54998,7 +55380,8 @@ extension EC2 { public let dnsSupport: DnsSupportValue? /// Indicates whether multicast is enabled on the transit gateway public let multicastSupport: MulticastSupportValue? - /// This parameter is in preview and may not be available for your account. Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature. + /// Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management. + /// This option is disabled by default. For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide. public let securityGroupReferencingSupport: SecurityGroupReferencingSupportValue? /// One or more IPv4 or IPv6 CIDR blocks for the transit gateway. Must be a size /24 CIDR block or larger for IPv4, or a size /64 CIDR block or larger for IPv6. @OptionalCustomCoding> @@ -55341,7 +55724,7 @@ extension EC2 { public let dnsSupport: DnsSupportValue? /// Indicates whether IPv6 support is disabled. public let ipv6Support: Ipv6SupportValue? - /// This parameter is in preview and may not be available for your account. Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature. + /// Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management. This option is enabled by default. For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide. public let securityGroupReferencingSupport: SecurityGroupReferencingSupportValue? @inlinable @@ -55681,7 +56064,7 @@ extension EC2 { public struct UnmonitorInstancesRequest: AWSEncodableShape { public struct _InstanceIdsEncoding: ArrayCoderProperties { public static let member = "InstanceId" } - /// Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + /// Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. public let dryRun: Bool? /// The IDs of the instances. @OptionalCustomCoding> diff --git a/Sources/Soto/Services/ECR/ECR_shapes.swift b/Sources/Soto/Services/ECR/ECR_shapes.swift index 8114f70a05..642b767edc 100644 --- a/Sources/Soto/Services/ECR/ECR_shapes.swift +++ b/Sources/Soto/Services/ECR/ECR_shapes.swift @@ -1440,7 +1440,7 @@ extension ECR { } public struct EncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. If you use the KMS_DSSE encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the KMS Management Service key stored in KMS. Similar to the KMS encryption type, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you've already created. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the Amazon Simple Storage Service Console Developer Guide. + /// The encryption type to use. If you use the KMS encryption type, the contents of the repository will be encrypted using server-side encryption with Key Management Service key stored in KMS. When you use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you already created. If you use the KMS_DSSE encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the KMS Management Service key stored in KMS. Similar to the KMS encryption type, you can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS key, which you've already created. If you use the AES256 encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm. For more information, see Amazon ECR encryption at rest in the Amazon Elastic Container Registry User Guide. public let encryptionType: EncryptionType /// If you use the KMS encryption type, specify the KMS key to use for encryption. The alias, key ID, or full ARN of the KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default Amazon Web Services managed KMS key for Amazon ECR will be used. public let kmsKey: String? @@ -1490,10 +1490,14 @@ extension ECR { public let awsAccountId: String? /// The description of the finding. public let description: String? + /// If a finding discovered in your environment has an exploit available. + public let exploitAvailable: String? /// The Amazon Resource Number (ARN) of the finding. public let findingArn: String? /// The date and time that the finding was first observed. public let firstObservedAt: Date? + /// Details on whether a fix is available through a version update. This value can be YES, NO, or PARTIAL. A PARTIAL fix means that some, but not all, of the packages identified in the finding have fixes available through updated versions. + public let fixAvailable: String? /// The date and time that the finding was last observed. public let lastObservedAt: Date? /// An object that contains the details of a package vulnerability finding. @@ -1518,11 +1522,13 @@ extension ECR { public let updatedAt: Date? @inlinable - public init(awsAccountId: String? = nil, description: String? = nil, findingArn: String? = nil, firstObservedAt: Date? = nil, lastObservedAt: Date? = nil, packageVulnerabilityDetails: PackageVulnerabilityDetails? = nil, remediation: Remediation? = nil, resources: [Resource]? = nil, score: Double? = nil, scoreDetails: ScoreDetails? = nil, severity: String? = nil, status: String? = nil, title: String? = nil, type: String? = nil, updatedAt: Date? = nil) { + public init(awsAccountId: String? = nil, description: String? = nil, exploitAvailable: String? = nil, findingArn: String? = nil, firstObservedAt: Date? = nil, fixAvailable: String? = nil, lastObservedAt: Date? = nil, packageVulnerabilityDetails: PackageVulnerabilityDetails? = nil, remediation: Remediation? = nil, resources: [Resource]? = nil, score: Double? = nil, scoreDetails: ScoreDetails? = nil, severity: String? = nil, status: String? = nil, title: String? = nil, type: String? = nil, updatedAt: Date? = nil) { self.awsAccountId = awsAccountId self.description = description + self.exploitAvailable = exploitAvailable self.findingArn = findingArn self.firstObservedAt = firstObservedAt + self.fixAvailable = fixAvailable self.lastObservedAt = lastObservedAt self.packageVulnerabilityDetails = packageVulnerabilityDetails self.remediation = remediation @@ -1539,8 +1545,10 @@ extension ECR { private enum CodingKeys: String, CodingKey { case awsAccountId = "awsAccountId" case description = "description" + case exploitAvailable = "exploitAvailable" case findingArn = "findingArn" case firstObservedAt = "firstObservedAt" + case fixAvailable = "fixAvailable" case lastObservedAt = "lastObservedAt" case packageVulnerabilityDetails = "packageVulnerabilityDetails" case remediation = "remediation" @@ -3789,6 +3797,8 @@ extension ECR { public let epoch: Int? /// The file path of the vulnerable package. public let filePath: String? + /// The version of the package that contains the vulnerability fix. + public let fixedInVersion: String? /// The name of the vulnerable package. public let name: String? /// The package manager of the vulnerable package. @@ -3801,10 +3811,11 @@ extension ECR { public let version: String? @inlinable - public init(arch: String? = nil, epoch: Int? = nil, filePath: String? = nil, name: String? = nil, packageManager: String? = nil, release: String? = nil, sourceLayerHash: String? = nil, version: String? = nil) { + public init(arch: String? = nil, epoch: Int? = nil, filePath: String? = nil, fixedInVersion: String? = nil, name: String? = nil, packageManager: String? = nil, release: String? = nil, sourceLayerHash: String? = nil, version: String? = nil) { self.arch = arch self.epoch = epoch self.filePath = filePath + self.fixedInVersion = fixedInVersion self.name = name self.packageManager = packageManager self.release = release @@ -3816,6 +3827,7 @@ extension ECR { case arch = "arch" case epoch = "epoch" case filePath = "filePath" + case fixedInVersion = "fixedInVersion" case name = "name" case packageManager = "packageManager" case release = "release" diff --git a/Sources/Soto/Services/ECS/ECS_api.swift b/Sources/Soto/Services/ECS/ECS_api.swift index 1f1ae08c73..963b658e7c 100644 --- a/Sources/Soto/Services/ECS/ECS_api.swift +++ b/Sources/Soto/Services/ECS/ECS_api.swift @@ -211,7 +211,7 @@ public struct ECS: AWSService { /// Runs and maintains your desired number of tasks from a specified task definition. If /// the number of tasks running in a service drops below the desiredCount, /// Amazon ECS runs another copy of the task in the specified cluster. To update an existing - /// service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. In addition to maintaining the desired count of tasks in your service, you can + /// service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can /// optionally run your service behind one or more load balancers. The load balancers /// distribute traffic across the tasks that are associated with the service. For more /// information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or @@ -267,7 +267,7 @@ public struct ECS: AWSService { /// can specify only parameters that aren't controlled at the task set level. The only /// required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For /// information about task placement and task placement strategies, see Amazon ECS - /// task placement in the Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// task placement in the Amazon Elastic Container Service Developer Guide @Sendable @inlinable public func createService(_ input: CreateServiceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServiceResponse { @@ -283,7 +283,7 @@ public struct ECS: AWSService { /// Runs and maintains your desired number of tasks from a specified task definition. If /// the number of tasks running in a service drops below the desiredCount, /// Amazon ECS runs another copy of the task in the specified cluster. To update an existing - /// service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. In addition to maintaining the desired count of tasks in your service, you can + /// service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can /// optionally run your service behind one or more load balancers. The load balancers /// distribute traffic across the tasks that are associated with the service. For more /// information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or @@ -339,7 +339,7 @@ public struct ECS: AWSService { /// can specify only parameters that aren't controlled at the task set level. The only /// required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For /// information about task placement and task placement strategies, see Amazon ECS - /// task placement in the Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// task placement in the Amazon Elastic Container Service Developer Guide /// /// Parameters: /// - capacityProviderStrategy: The capacity provider strategy to use for the service. If a capacityProviderStrategy is specified, the launchType @@ -2059,10 +2059,10 @@ public struct ECS: AWSService { return try await self.registerTaskDefinition(input, logger: logger) } - /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places + /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places /// tasks using placement constraints and placement strategies. For more information, see /// Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or - /// place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// place tasks manually on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the /// distributed nature of the system supporting the API. This means that the result of an /// API command you run that affects your Amazon ECS resources might not be immediately visible @@ -2088,10 +2088,10 @@ public struct ECS: AWSService { logger: logger ) } - /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places + /// Starts a new task using the specified task definition. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places /// tasks using placement constraints and placement strategies. For more information, see /// Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. Alternatively, you can use StartTask to use your own scheduler or - /// place tasks manually on specific container instances. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or + /// place tasks manually on specific container instances. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. The Amazon ECS API follows an eventual consistency model. This is because of the /// distributed nature of the system supporting the API. This means that the result of an /// API command you run that affects your Amazon ECS resources might not be immediately visible @@ -2175,7 +2175,7 @@ public struct ECS: AWSService { } /// Starts a new task from the specified task definition on the specified container - /// instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. Alternatively, you can useRunTask to place tasks for you. For more + /// instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more /// information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. @Sendable @@ -2191,7 +2191,7 @@ public struct ECS: AWSService { ) } /// Starts a new task from the specified task definition on the specified container - /// instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. Alternatively, you can useRunTask to place tasks for you. For more + /// instance or instances. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. Alternatively, you can useRunTask to place tasks for you. For more /// information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or /// updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. /// diff --git a/Sources/Soto/Services/ECS/ECS_shapes.swift b/Sources/Soto/Services/ECS/ECS_shapes.swift index 171c6768e9..0d697635f5 100644 --- a/Sources/Soto/Services/ECS/ECS_shapes.swift +++ b/Sources/Soto/Services/ECS/ECS_shapes.swift @@ -1003,13 +1003,13 @@ extension ECS { public struct ContainerDefinition: AWSEncodableShape & AWSDecodableShape { /// The command that's passed to the container. This parameter maps to Cmd in - /// the docker conainer create command and the + /// the docker container create command and the /// COMMAND parameter to docker /// run. If there are multiple arguments, each /// argument is a separated string in the array. public let command: [String]? /// The number of cpu units reserved for the container. This parameter maps - /// to CpuShares in the docker conainer create commandand the --cpu-shares option to docker run. This field is optional for tasks using the Fargate launch type, and the + /// to CpuShares in the docker container create commandand the --cpu-shares option to docker run. This field is optional for tasks using the Fargate launch type, and the /// only requirement is that the total amount of CPU reserved for all containers within a /// task be lower than the task-level cpu value. You can determine the number of CPU units that are available per EC2 instance type /// by multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page @@ -1072,16 +1072,16 @@ extension ECS { /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. public let dependsOn: [ContainerDependency]? /// When this parameter is true, networking is off within the container. This parameter - /// maps to NetworkDisabled in the docker conainer create command. This parameter is not supported for Windows containers. + /// maps to NetworkDisabled in the docker container create command. This parameter is not supported for Windows containers. public let disableNetworking: Bool? /// A list of DNS search domains that are presented to the container. This parameter maps - /// to DnsSearch in the docker conainer create command and the --dns-search option to docker run. This parameter is not supported for Windows containers. + /// to DnsSearch in the docker container create command and the --dns-search option to docker run. This parameter is not supported for Windows containers. public let dnsSearchDomains: [String]? /// A list of DNS servers that are presented to the container. This parameter maps to - /// Dns in the the docker conainer create command and the --dns option to docker run. This parameter is not supported for Windows containers. + /// Dns in the docker container create command and the --dns option to docker run. This parameter is not supported for Windows containers. public let dnsServers: [String]? /// A key/value map of labels to add to the container. This parameter maps to - /// Labels in the docker conainer create command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' + /// Labels in the docker container create command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' public let dockerLabels: [String: String]? /// A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks /// using the Fargate launch type. For Linux tasks on EC2, this parameter can be used to reference custom @@ -1089,7 +1089,7 @@ extension ECS { /// credential spec file that configures a container for Active Directory authentication. /// For more information, see Using gMSAs for Windows /// Containers and Using gMSAs for Linux - /// Containers in the Amazon Elastic Container Service Developer Guide. This parameter maps to SecurityOpt in the docker conainer create command and the + /// Containers in the Amazon Elastic Container Service Developer Guide. This parameter maps to SecurityOpt in the docker container create command and the /// --security-opt option to docker /// run. The Amazon ECS container agent running on a container instance must register with the /// ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true @@ -1102,10 +1102,10 @@ extension ECS { /// entryPoint parameters. If you have problems using /// entryPoint, update your container agent or enter your commands and /// arguments as command array items instead. The entry point that's passed to the container. This parameter maps to - /// Entrypoint in tthe docker conainer create command and the --entrypoint option to docker run. + /// Entrypoint in the docker container create command and the --entrypoint option to docker run. public let entryPoint: [String]? /// The environment variables to pass to a container. This parameter maps to - /// Env in the docker conainer create command and the --env option to docker run. We don't recommend that you use plaintext environment variables for sensitive + /// Env in the docker container create command and the --env option to docker run. We don't recommend that you use plaintext environment variables for sensitive /// information, such as credential data. public let environment: [KeyValuePair]? /// A list of files containing the environment variables to pass to a container. This @@ -1130,7 +1130,7 @@ extension ECS { /// Architecture in the Amazon Elastic Container Service Developer Guide. public let essential: Bool? /// A list of hostnames and IP address mappings to append to the /etc/hosts - /// file on the container. This parameter maps to ExtraHosts in the docker conainer create command and the + /// file on the container. This parameter maps to ExtraHosts in the docker container create command and the /// --add-host option to docker /// run. This parameter isn't supported for Windows containers or tasks that use the /// awsvpc network mode. @@ -1140,19 +1140,19 @@ extension ECS { /// in the Amazon Elastic Container Service Developer Guide. public let firelensConfiguration: FirelensConfiguration? /// The container health check command and associated configuration parameters for the - /// container. This parameter maps to HealthCheck in the docker conainer create command and the + /// container. This parameter maps to HealthCheck in the docker container create command and the /// HEALTHCHECK parameter of docker /// run. public let healthCheck: HealthCheck? /// The hostname to use for your container. This parameter maps to Hostname - /// in thethe docker conainer create command and the + /// in the docker container create command and the /// --hostname option to docker /// run. The hostname parameter is not supported if you're using the /// awsvpc network mode. public let hostname: String? /// The image used to start a container. This string is passed directly to the Docker /// daemon. By default, images in the Docker Hub registry are available. Other repositories - /// are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker conainer create command and the + /// are specified with either repository-url/image:tag or repository-url/image@digest . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker container create command and the /// IMAGE parameter of docker /// run. When a new task starts, the Amazon ECS container agent pulls the latest version of /// the specified image and tag for the container to use. However, subsequent @@ -1169,13 +1169,13 @@ extension ECS { public let image: String? /// When this parameter is true, you can deploy containerized applications /// that require stdin or a tty to be allocated. This parameter - /// maps to OpenStdin in the docker conainer create command and the --interactive option to docker run. + /// maps to OpenStdin in the docker container create command and the --interactive option to docker run. public let interactive: Bool? /// The links parameter allows containers to communicate with each other /// without the need for port mappings. This parameter is only supported if the network mode /// of a task definition is bridge. The name:internalName /// construct is analogous to name:alias in Docker links. - /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker conainer create command and the + /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker container create command and the /// --link option to docker /// run. This parameter is not supported for Windows containers. Containers that are collocated on a single container instance may be able to /// communicate with each other without requiring links or host port mappings. Network @@ -1185,7 +1185,7 @@ extension ECS { /// Linux-specific modifications that are applied to the container, such as Linux kernel /// capabilities. For more information see KernelCapabilities. This parameter is not supported for Windows containers. public let linuxParameters: LinuxParameters? - /// The log configuration specification for the container. This parameter maps to LogConfig in the docker conainer create command and the + /// The log configuration specification for the container. This parameter maps to LogConfig in the docker container create command and the /// --log-driver option to docker /// run. By default, containers use the same logging driver that the Docker /// daemon uses. However the container can use a different logging driver than the Docker @@ -1205,7 +1205,7 @@ extension ECS { /// to exceed the memory specified here, the container is killed. The total amount of memory /// reserved for all containers within a task must be lower than the task /// memory value, if one is specified. This parameter maps to - /// Memory in thethe docker conainer create command and the --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level + /// Memory in the docker container create command and the --memory option to docker run. If using the Fargate launch type, this parameter is optional. If using the EC2 launch type, you must specify either a task-level /// memory value or a container-level memory value. If you specify both a container-level /// memory and memoryReservation value, memory /// must be greater than memoryReservation. If you specify @@ -1220,7 +1220,7 @@ extension ECS { /// However, your container can consume more memory when it needs to, up to either the hard /// limit specified with the memory parameter (if applicable), or all of the /// available memory on the container instance, whichever comes first. This parameter maps - /// to MemoryReservation in the the docker conainer create command and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for + /// to MemoryReservation in the docker container create command and the --memory-reservation option to docker run. If a task-level memory value is not specified, you must specify a non-zero integer for /// one or both of memory or memoryReservation in a container /// definition. If you specify both, memory must be greater than /// memoryReservation. If you specify memoryReservation, then @@ -1235,14 +1235,14 @@ extension ECS { /// container. So, don't specify less than 6 MiB of memory for your containers. The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a /// container. So, don't specify less than 4 MiB of memory for your containers. public let memoryReservation: Int? - /// The mount points for data volumes in your container. This parameter maps to Volumes in the the docker conainer create command and the --volume option to docker run. Windows containers can mount whole directories on the same drive as + /// The mount points for data volumes in your container. This parameter maps to Volumes in the docker container create command and the --volume option to docker run. Windows containers can mount whole directories on the same drive as /// $env:ProgramData. Windows containers can't mount directories on a /// different drive, and mount point can't be across drives. public let mountPoints: [MountPoint]? /// The name of a container. If you're linking multiple containers together in a task /// definition, the name of one container can be entered in the /// links of another container to connect the containers. - /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in tthe docker conainer create command and the + /// Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the docker container create command and the /// --name option to docker /// run. public let name: String? @@ -1252,7 +1252,7 @@ extension ECS { /// be the same value as the containerPort. Port mappings on Windows use the NetNAT gateway address rather than /// localhost. There's no loopback for port mappings on Windows, so you /// can't access a container's mapped port from the host itself. This parameter maps to PortBindings in the - /// the docker conainer create command and the + /// the docker container create command and the /// --publish option to docker /// run. If the network mode of a task definition is set to none, /// then you can't specify port mappings. If the network mode of a task definition is set to @@ -1266,13 +1266,13 @@ extension ECS { public let portMappings: [PortMapping]? /// When this parameter is true, the container is given elevated privileges on the host /// container instance (similar to the root user). This parameter maps to - /// Privileged in the the docker conainer create command and the --privileged option to docker run This parameter is not supported for Windows containers or tasks run on Fargate. + /// Privileged in the docker container create command and the --privileged option to docker run This parameter is not supported for Windows containers or tasks run on Fargate. public let privileged: Bool? /// When this parameter is true, a TTY is allocated. This parameter maps to - /// Tty in tthe docker conainer create command and the --tty option to docker run. + /// Tty in the docker container create command and the --tty option to docker run. public let pseudoTerminal: Bool? /// When this parameter is true, the container is given read-only access to its root file - /// system. This parameter maps to ReadonlyRootfs in the docker conainer create command and the + /// system. This parameter maps to ReadonlyRootfs in the docker container create command and the /// --read-only option to docker /// run. This parameter is not supported for Windows containers. public let readonlyRootFilesystem: Bool? @@ -1309,7 +1309,7 @@ extension ECS { public let startTimeout: Int? /// Time duration (in seconds) to wait before the container is forcefully killed if it /// doesn't exit normally on its own. For tasks using the Fargate launch type, the task or service requires - /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. The max stop timeout value is 120 seconds and if the parameter is not specified, the + /// the following platforms: Linux platform version 1.3.0 or later. Windows platform version 1.0.0 or later. For tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the parameter is not specified, the /// default value of 30 seconds is used. For tasks that use the EC2 launch type, if the stopTimeout /// parameter isn't specified, the value set for the Amazon ECS container agent configuration /// variable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the @@ -1322,16 +1322,16 @@ extension ECS { /// an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the /// ecs-init package. If your container instances are launched from version /// 20190301 or later, then they contain the required versions of the - /// container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values are 2-120 seconds. + /// container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide. The valid values for Fargate are 2-120 seconds. public let stopTimeout: Int? /// A list of namespaced kernel parameters to set in the container. This parameter maps to - /// Sysctls in tthe docker conainer create command and the --sysctl option to docker run. For example, you can configure + /// Sysctls in the docker container create command and the --sysctl option to docker run. For example, you can configure /// net.ipv4.tcp_keepalive_time setting to maintain longer lived /// connections. public let systemControls: [SystemControl]? /// A list of ulimits to set in the container. If a ulimit value /// is specified in a task definition, it overrides the default values set by Docker. This - /// parameter maps to Ulimits in tthe docker conainer create command and the --ulimit option to docker run. Valid naming values are displayed + /// parameter maps to Ulimits in the docker container create command and the --ulimit option to docker run. Valid naming values are displayed /// in the Ulimit data type. Amazon ECS tasks hosted on Fargate use the default /// resource limit values set by the operating system with the exception of /// the nofile resource limit parameter which Fargate @@ -1340,7 +1340,7 @@ extension ECS { /// nofile soft limit is 65535 and the default hard limit /// is 65535. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' This parameter is not supported for Windows containers. public let ulimits: [Ulimit]? - /// The user to use inside the container. This parameter maps to User in the docker conainer create command and the + /// The user to use inside the container. This parameter maps to User in the docker container create command and the /// --user option to docker /// run. When running tasks using the host network mode, don't run containers /// using the root user (UID 0). We recommend using a non-root user for better @@ -1348,10 +1348,10 @@ extension ECS { /// or GID, you must specify it as a positive integer. user user:group uid uid:gid user:gid uid:group This parameter is not supported for Windows containers. public let user: String? /// Data volumes to mount from another container. This parameter maps to - /// VolumesFrom in tthe docker conainer create command and the --volumes-from option to docker run. + /// VolumesFrom in the docker container create command and the --volumes-from option to docker run. public let volumesFrom: [VolumeFrom]? /// The working directory to run commands inside the container in. This parameter maps to - /// WorkingDir in the docker conainer create command and the --workdir option to docker run. + /// WorkingDir in the docker container create command and the --workdir option to docker run. public let workingDirectory: String? @inlinable @@ -2702,11 +2702,12 @@ extension ECS { /// tasks before stopping the four older tasks (provided that the cluster resources required /// to do this are available). The default maximumPercent value for a service /// using the REPLICA service scheduler is 200%. If a service is using either the blue/green (CODE_DEPLOY) or - /// EXTERNAL deployment types and tasks that use the EC2 + /// EXTERNAL deployment types, and tasks in the service use the EC2 /// launch type, the maximum percent value is set to the - /// default value and is used to define the upper limit on the number of the tasks in the + /// default value. The maximum percent value is used to define the upper limit on the number of the tasks in the /// service that remain in the RUNNING state while the container instances are - /// in the DRAINING state. If the tasks in the service use the + /// in the DRAINING state. You can't specify a custom maximumPercent value for a service that uses either the blue/green (CODE_DEPLOY) or + /// EXTERNAL deployment types and has tasks that use the EC2 launch type. If the tasks in the service use the /// Fargate launch type, the maximum percent value is not used, although it is /// returned when describing your service. public let maximumPercent: Int? @@ -2742,9 +2743,10 @@ extension ECS { /// rounded up to the nearest integer value. If a service is using either the blue/green (CODE_DEPLOY) or /// EXTERNAL deployment types and is running tasks that use the /// EC2 launch type, the minimum healthy - /// percent value is set to the default value and is used to define the lower + /// percent value is set to the default value. The minimum healthy percent value is used to define the lower /// limit on the number of the tasks in the service that remain in the RUNNING - /// state while the container instances are in the DRAINING state. If a service + /// state while the container instances are in the DRAINING state. You can't specify a custom minimumHealthyPercent value for a service that uses either the blue/green (CODE_DEPLOY) or + /// EXTERNAL deployment types and has tasks that use the EC2 launch type. If a service /// is using either the blue/green (CODE_DEPLOY) or EXTERNAL /// deployment types and is running tasks that use the Fargate launch type, /// the minimum healthy percent value is not used, although it is returned when describing @@ -3292,7 +3294,7 @@ extension ECS { /// by Docker because it is used for task placement. If the driver was installed using the /// Docker plugin CLI, use docker plugin ls to retrieve the driver name from /// your container instance. If the driver was installed using another method, use Docker - /// plugin discovery to retrieve the driver name. This parameter maps to Driver in the docker conainer create command and the + /// plugin discovery to retrieve the driver name. This parameter maps to Driver in the docker container create command and the /// xxdriver option to docker /// volume create. public let driver: String? @@ -3301,7 +3303,7 @@ extension ECS { /// volume create. public let driverOpts: [String: String]? /// Custom metadata to add to your Docker volume. This parameter maps to - /// Labels in the docker conainer create command and the xxlabel option to docker + /// Labels in the docker container create command and the xxlabel option to docker /// volume create. public let labels: [String: String]? /// The scope for the Docker volume that determines its lifecycle. Docker volumes that are @@ -3736,7 +3738,7 @@ extension ECS { /// directly, or CMD-SHELL to run the command with the container's default /// shell. When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list /// of commands in double quotes and brackets. [ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ] You don't include the double quotes and brackets when you use the Amazon Web Services Management Console. CMD-SHELL, curl -f http://localhost/ || exit 1 An exit code of 0 indicates success, and non-zero exit code indicates failure. For - /// more information, see HealthCheck in tthe docker conainer create command + /// more information, see HealthCheck in the docker container create command public let command: [String] /// The time period in seconds between each health check execution. You may specify /// between 5 and 300 seconds. The default value is 30 seconds. @@ -3880,7 +3882,7 @@ extension ECS { public struct KernelCapabilities: AWSEncodableShape & AWSDecodableShape { /// The Linux capabilities for the container that have been added to the default - /// configuration provided by Docker. This parameter maps to CapAdd in the docker conainer create command and the + /// configuration provided by Docker. This parameter maps to CapAdd in the docker container create command and the /// --cap-add option to docker /// run. Tasks launched on Fargate only support adding the SYS_PTRACE kernel /// capability. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | @@ -3893,7 +3895,7 @@ extension ECS { /// "WAKE_ALARM" public let add: [String]? /// The Linux capabilities for the container that have been removed from the default - /// configuration provided by Docker. This parameter maps to CapDrop in the docker conainer create command and the + /// configuration provided by Docker. This parameter maps to CapDrop in the docker container create command and the /// --cap-drop option to docker /// run. Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | /// "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" | @@ -3945,7 +3947,7 @@ extension ECS { /// later. public let capabilities: KernelCapabilities? /// Any host devices to expose to the container. This parameter maps to - /// Devices in tthe docker conainer create command and the --device option to docker run. If you're using tasks that use the Fargate launch type, the + /// Devices in the docker container create command and the --device option to docker run. If you're using tasks that use the Fargate launch type, the /// devices parameter isn't supported. public let devices: [Device]? /// Run an init process inside the container that forwards signals and reaps @@ -4731,7 +4733,129 @@ extension ECS { /// submit pull requests for changes that you would like to have included. However, we /// don't currently provide support for running modified copies of this software. public let logDriver: LogDriver - /// The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' + /// The configuration options to send to the log driver. The options you can specify depend on the log driver. Some + /// of the options you can specify when you use the awslogs log driver to route logs to + /// Amazon CloudWatch include the following: awslogs-create-group Required: No Specify whether you want the log group to be + /// created automatically. If this option isn't + /// specified, it defaults to + /// false. Your IAM policy must include the + /// logs:CreateLogGroup permission before + /// you attempt to use + /// awslogs-create-group. awslogs-region Required: Yes Specify the Amazon Web Services Region that the + /// awslogs log driver is to send your + /// Docker logs to. You can choose to send all of your + /// logs from clusters in different Regions to a + /// single region in CloudWatch Logs. This is so that they're + /// all visible in one location. Otherwise, you can + /// separate them by Region for more granularity. Make + /// sure that the specified log group exists in the + /// Region that you specify with this option. awslogs-group Required: Yes Make sure to specify a log group that the + /// awslogs log driver sends its log + /// streams to. awslogs-stream-prefix Required: Yes, when + /// using the Fargate launch + /// type.Optional for + /// the EC2 launch type, required for + /// the Fargate launch + /// type. Use the awslogs-stream-prefix + /// option to associate a log stream with the + /// specified prefix, the container name, and the ID + /// of the Amazon ECS task that the container belongs to. + /// If you specify a prefix with this option, then the + /// log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix + /// with this option, then the log stream is named + /// after the container ID that's assigned by the + /// Docker daemon on the container instance. Because + /// it's difficult to trace logs back to the container + /// that sent them with just the Docker container ID + /// (which is only available on the container + /// instance), we recommend that you specify a prefix + /// with this option. For Amazon ECS services, you can use the service + /// name as the prefix. Doing so, you can trace log + /// streams to the service that the container belongs + /// to, the name of the container that sent them, and + /// the ID of the task that the container belongs + /// to. You must specify a + /// stream-prefix for your logs to have your logs + /// appear in the Log pane when using the Amazon ECS + /// console. awslogs-datetime-format Required: No This option defines a multiline start pattern + /// in Python strftime format. A log + /// message consists of a line that matches the + /// pattern and any following lines that don’t match + /// the pattern. The matched line is the delimiter + /// between log messages. One example of a use case for using this + /// format is for parsing output such as a stack dump, + /// which might otherwise be logged in multiple + /// entries. The correct pattern allows it to be + /// captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the + /// awslogs-datetime-format and + /// awslogs-multiline-pattern + /// options. Multiline logging performs regular + /// expression parsing and matching of all log + /// messages. This might have a negative impact on + /// logging performance. awslogs-multiline-pattern Required: No This option defines a multiline start pattern + /// that uses a regular expression. A log message + /// consists of a line that matches the pattern and + /// any following lines that don’t match the pattern. + /// The matched line is the delimiter between log + /// messages. For more information, see awslogs-multiline-pattern. This option is ignored if + /// awslogs-datetime-format is also + /// configured. You cannot configure both the + /// awslogs-datetime-format and + /// awslogs-multiline-pattern + /// options. Multiline logging performs regular + /// expression parsing and matching of all log + /// messages. This might have a negative impact on + /// logging performance. mode Required: No Valid values: non-blocking | + /// blocking This option defines the delivery mode of log + /// messages from the container to CloudWatch Logs. The delivery + /// mode you choose affects application availability + /// when the flow of logs from container to CloudWatch is + /// interrupted. If you use the blocking + /// mode and the flow of logs to CloudWatch is interrupted, + /// calls from container code to write to the + /// stdout and stderr + /// streams will block. The logging thread of the + /// application will block as a result. This may cause + /// the application to become unresponsive and lead to + /// container healthcheck failure. If you use the non-blocking mode, + /// the container's logs are instead stored in an + /// in-memory intermediate buffer configured with the + /// max-buffer-size option. This prevents + /// the application from becoming unresponsive when + /// logs cannot be sent to CloudWatch. We recommend using this mode if you want to + /// ensure service availability and are okay with some + /// log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. max-buffer-size Required: No Default value: 1m When non-blocking mode is used, + /// the max-buffer-size log option + /// controls the size of the buffer that's used for + /// intermediate message storage. Make sure to specify + /// an adequate buffer size based on your application. + /// When the buffer fills up, further logs cannot be + /// stored. Logs that cannot be stored are lost. + /// To route logs using the splunk log router, you need to specify a + /// splunk-token and a + /// splunk-url. When you use the awsfirelens log router to route logs to an Amazon Web Services Service or + /// Amazon Web Services Partner Network destination for log storage and analytics, you can + /// set the log-driver-buffer-limit option to limit + /// the number of events that are buffered in memory, before + /// being sent to the log router container. It can help to + /// resolve potential log loss issue because high throughput + /// might result in memory running out for the buffer inside of + /// Docker. Other options you can specify when using awsfirelens to route + /// logs depend on the destination. When you export logs to + /// Amazon Data Firehose, you can specify the Amazon Web Services Region with + /// region and a name for the log stream with + /// delivery_stream. When you export logs to + /// Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with + /// region and a data stream name with + /// stream. When you export logs to Amazon OpenSearch Service, + /// you can specify options like Name, + /// Host (OpenSearch Service endpoint without protocol), Port, + /// Index, Type, + /// Aws_auth, Aws_region, Suppress_Type_Name, and + /// tls. When you export logs to Amazon S3, you can + /// specify the bucket using the bucket option. You can also specify region, + /// total_file_size, upload_timeout, + /// and use_put_object as options. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}' public let options: [String: String]? /// The secrets to pass to the log configuration. For more information, see Specifying /// sensitive data in the Amazon Elastic Container Service Developer Guide. @@ -5325,8 +5449,7 @@ extension ECS { /// mode, your tasks using the awsvpc network mode can have an IPv6 /// address assigned. For more information on using IPv6 with tasks launched on /// Amazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6 - /// with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateFIPSMode - If you specify fargateFIPSMode, - /// Fargate FIPS 140 compliance is affected. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a + /// with tasks launched on Fargate, see Using a VPC in dual-stack mode. fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a /// security or infrastructure update is needed for an Amazon ECS task hosted on /// Fargate, the tasks need to be stopped and new tasks launched to replace them. /// Use fargateTaskRetirementWaitPeriod to configure the wait time to @@ -7225,8 +7348,8 @@ extension ECS { } public struct TaskDefinition: AWSDecodableShape { - /// The task launch types the task definition validated against during task definition - /// registration. For more information, see Amazon ECS launch types + /// Amazon ECS validates the task definition parameters with those supported by the launch type. For + /// more information, see Amazon ECS launch types /// in the Amazon Elastic Container Service Developer Guide. public let compatibilities: [Compatibility]? /// A list of container definitions in JSON format that describe the different containers @@ -7798,11 +7921,11 @@ extension ECS { } public struct Ulimit: AWSEncodableShape & AWSDecodableShape { - /// The hard limit for the ulimit type. + /// The hard limit for the ulimit type. The value can be specified in bytes, seconds, or as a count, depending on the type of the ulimit. public let hardLimit: Int /// The type of the ulimit. public let name: UlimitName - /// The soft limit for the ulimit type. + /// The soft limit for the ulimit type. The value can be specified in bytes, seconds, or as a count, depending on the type of the ulimit. public let softLimit: Int @inlinable diff --git a/Sources/Soto/Services/EFS/EFS_api.swift b/Sources/Soto/Services/EFS/EFS_api.swift index f6f13bf8e2..8965df77fe 100644 --- a/Sources/Soto/Services/EFS/EFS_api.swift +++ b/Sources/Soto/Services/EFS/EFS_api.swift @@ -91,6 +91,7 @@ public struct EFS: AWSService { "ap-southeast-2": "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", "ap-southeast-3": "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", "ap-southeast-4": "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + "ap-southeast-5": "elasticfilesystem-fips.ap-southeast-5.amazonaws.com", "ca-central-1": "elasticfilesystem-fips.ca-central-1.amazonaws.com", "ca-west-1": "elasticfilesystem-fips.ca-west-1.amazonaws.com", "cn-north-1": "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", diff --git a/Sources/Soto/Services/EMR/EMR_shapes.swift b/Sources/Soto/Services/EMR/EMR_shapes.swift index 7f444d497c..ef3d45ce93 100644 --- a/Sources/Soto/Services/EMR/EMR_shapes.swift +++ b/Sources/Soto/Services/EMR/EMR_shapes.swift @@ -2255,6 +2255,8 @@ extension EMR { } public struct InstanceFleet: AWSDecodableShape { + /// Reserved. + public let context: String? /// The unique identifier of the instance fleet. public let id: String? /// The node type that the instance fleet hosts. Valid values are MASTER, CORE, or TASK. @@ -2279,7 +2281,8 @@ extension EMR { public let targetSpotCapacity: Int? @inlinable - public init(id: String? = nil, instanceFleetType: InstanceFleetType? = nil, instanceTypeSpecifications: [InstanceTypeSpecification]? = nil, launchSpecifications: InstanceFleetProvisioningSpecifications? = nil, name: String? = nil, provisionedOnDemandCapacity: Int? = nil, provisionedSpotCapacity: Int? = nil, resizeSpecifications: InstanceFleetResizingSpecifications? = nil, status: InstanceFleetStatus? = nil, targetOnDemandCapacity: Int? = nil, targetSpotCapacity: Int? = nil) { + public init(context: String? = nil, id: String? = nil, instanceFleetType: InstanceFleetType? = nil, instanceTypeSpecifications: [InstanceTypeSpecification]? = nil, launchSpecifications: InstanceFleetProvisioningSpecifications? = nil, name: String? = nil, provisionedOnDemandCapacity: Int? = nil, provisionedSpotCapacity: Int? = nil, resizeSpecifications: InstanceFleetResizingSpecifications? = nil, status: InstanceFleetStatus? = nil, targetOnDemandCapacity: Int? = nil, targetSpotCapacity: Int? = nil) { + self.context = context self.id = id self.instanceFleetType = instanceFleetType self.instanceTypeSpecifications = instanceTypeSpecifications @@ -2294,6 +2297,7 @@ extension EMR { } private enum CodingKeys: String, CodingKey { + case context = "Context" case id = "Id" case instanceFleetType = "InstanceFleetType" case instanceTypeSpecifications = "InstanceTypeSpecifications" @@ -2309,6 +2313,8 @@ extension EMR { } public struct InstanceFleetConfig: AWSEncodableShape { + /// Reserved. + public let context: String? /// The node type that the instance fleet hosts. Valid values are MASTER, CORE, and TASK. public let instanceFleetType: InstanceFleetType? /// The instance type configurations that define the Amazon EC2 instances in the instance fleet. @@ -2325,7 +2331,8 @@ extension EMR { public let targetSpotCapacity: Int? @inlinable - public init(instanceFleetType: InstanceFleetType? = nil, instanceTypeConfigs: [InstanceTypeConfig]? = nil, launchSpecifications: InstanceFleetProvisioningSpecifications? = nil, name: String? = nil, resizeSpecifications: InstanceFleetResizingSpecifications? = nil, targetOnDemandCapacity: Int? = nil, targetSpotCapacity: Int? = nil) { + public init(context: String? = nil, instanceFleetType: InstanceFleetType? = nil, instanceTypeConfigs: [InstanceTypeConfig]? = nil, launchSpecifications: InstanceFleetProvisioningSpecifications? = nil, name: String? = nil, resizeSpecifications: InstanceFleetResizingSpecifications? = nil, targetOnDemandCapacity: Int? = nil, targetSpotCapacity: Int? = nil) { + self.context = context self.instanceFleetType = instanceFleetType self.instanceTypeConfigs = instanceTypeConfigs self.launchSpecifications = launchSpecifications @@ -2336,6 +2343,8 @@ extension EMR { } public func validate(name: String) throws { + try self.validate(self.context, name: "context", parent: name, max: 256) + try self.validate(self.context, name: "context", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") try self.instanceTypeConfigs?.forEach { try $0.validate(name: "\(name).instanceTypeConfigs[]") } @@ -2348,6 +2357,7 @@ extension EMR { } private enum CodingKeys: String, CodingKey { + case context = "Context" case instanceFleetType = "InstanceFleetType" case instanceTypeConfigs = "InstanceTypeConfigs" case launchSpecifications = "LaunchSpecifications" @@ -2359,6 +2369,8 @@ extension EMR { } public struct InstanceFleetModifyConfig: AWSEncodableShape { + /// Reserved. + public let context: String? /// A unique identifier for the instance fleet. public let instanceFleetId: String? /// An array of InstanceTypeConfig objects that specify how Amazon EMR provisions Amazon EC2 instances when it fulfills On-Demand and Spot capacities. For more information, see InstanceTypeConfig. @@ -2371,7 +2383,8 @@ extension EMR { public let targetSpotCapacity: Int? @inlinable - public init(instanceFleetId: String? = nil, instanceTypeConfigs: [InstanceTypeConfig]? = nil, resizeSpecifications: InstanceFleetResizingSpecifications? = nil, targetOnDemandCapacity: Int? = nil, targetSpotCapacity: Int? = nil) { + public init(context: String? = nil, instanceFleetId: String? = nil, instanceTypeConfigs: [InstanceTypeConfig]? = nil, resizeSpecifications: InstanceFleetResizingSpecifications? = nil, targetOnDemandCapacity: Int? = nil, targetSpotCapacity: Int? = nil) { + self.context = context self.instanceFleetId = instanceFleetId self.instanceTypeConfigs = instanceTypeConfigs self.resizeSpecifications = resizeSpecifications @@ -2380,6 +2393,8 @@ extension EMR { } public func validate(name: String) throws { + try self.validate(self.context, name: "context", parent: name, max: 256) + try self.validate(self.context, name: "context", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$") try self.instanceTypeConfigs?.forEach { try $0.validate(name: "\(name).instanceTypeConfigs[]") } @@ -2389,6 +2404,7 @@ extension EMR { } private enum CodingKeys: String, CodingKey { + case context = "Context" case instanceFleetId = "InstanceFleetId" case instanceTypeConfigs = "InstanceTypeConfigs" case resizeSpecifications = "ResizeSpecifications" diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift index 26ee74f0c9..ec9800a870 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_api.swift @@ -153,6 +153,7 @@ public struct EMRServerless: AWSService { /// - networkConfiguration: The network configuration for customer VPC connectivity. /// - releaseLabel: The Amazon EMR release associated with the application. /// - runtimeConfiguration: The Configuration specifications to use when creating an application. Each configuration consists of a classification and properties. This configuration is applied to all the job runs submitted under the application. + /// - schedulerConfiguration: The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. /// - tags: The tags assigned to the application. /// - type: The type of application you want to start, such as Spark or Hive. /// - workerTypeSpecifications: The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types. @@ -172,6 +173,7 @@ public struct EMRServerless: AWSService { networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, + schedulerConfiguration: SchedulerConfiguration? = nil, tags: [String: String]? = nil, type: String, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil, @@ -191,6 +193,7 @@ public struct EMRServerless: AWSService { networkConfiguration: networkConfiguration, releaseLabel: releaseLabel, runtimeConfiguration: runtimeConfiguration, + schedulerConfiguration: schedulerConfiguration, tags: tags, type: type, workerTypeSpecifications: workerTypeSpecifications @@ -682,6 +685,7 @@ public struct EMRServerless: AWSService { /// - networkConfiguration: /// - releaseLabel: The Amazon EMR release label for the application. You can change the release label to use a different release of Amazon EMR. /// - runtimeConfiguration: The Configuration specifications to use when updating an application. Each configuration consists of a classification and properties. This configuration is applied across all the job runs submitted under the application. + /// - schedulerConfiguration: The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. /// - workerTypeSpecifications: The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types. /// - logger: Logger use during operation @inlinable @@ -699,6 +703,7 @@ public struct EMRServerless: AWSService { networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String? = nil, runtimeConfiguration: [Configuration]? = nil, + schedulerConfiguration: SchedulerConfiguration? = nil, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateApplicationResponse { @@ -716,6 +721,7 @@ public struct EMRServerless: AWSService { networkConfiguration: networkConfiguration, releaseLabel: releaseLabel, runtimeConfiguration: runtimeConfiguration, + schedulerConfiguration: schedulerConfiguration, workerTypeSpecifications: workerTypeSpecifications ) return try await self.updateApplication(input, logger: logger) diff --git a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift index b1ea620921..9d43383cf2 100644 --- a/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift +++ b/Sources/Soto/Services/EMRServerless/EMRServerless_shapes.swift @@ -54,6 +54,7 @@ extension EMRServerless { case cancelling = "CANCELLING" case failed = "FAILED" case pending = "PENDING" + case queued = "QUEUED" case running = "RUNNING" case scheduled = "SCHEDULED" case submitted = "SUBMITTED" @@ -143,6 +144,8 @@ extension EMRServerless { public let releaseLabel: String /// The Configuration specifications of an application. Each configuration consists of a classification and properties. You use this parameter when creating or updating an application. To see the runtimeConfiguration object of an application, run the GetApplication API operation. public let runtimeConfiguration: [Configuration]? + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public let schedulerConfiguration: SchedulerConfiguration? /// The state of the application. public let state: ApplicationState /// The state details of the application. @@ -157,7 +160,7 @@ extension EMRServerless { public let workerTypeSpecifications: [String: WorkerTypeSpecification]? @inlinable - public init(applicationId: String, architecture: Architecture? = nil, arn: String, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, createdAt: Date, imageConfiguration: ImageConfiguration? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, state: ApplicationState, stateDetails: String? = nil, tags: [String: String]? = nil, type: String, updatedAt: Date, workerTypeSpecifications: [String: WorkerTypeSpecification]? = nil) { + public init(applicationId: String, architecture: Architecture? = nil, arn: String, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, createdAt: Date, imageConfiguration: ImageConfiguration? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, schedulerConfiguration: SchedulerConfiguration? = nil, state: ApplicationState, stateDetails: String? = nil, tags: [String: String]? = nil, type: String, updatedAt: Date, workerTypeSpecifications: [String: WorkerTypeSpecification]? = nil) { self.applicationId = applicationId self.architecture = architecture self.arn = arn @@ -173,6 +176,7 @@ extension EMRServerless { self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel self.runtimeConfiguration = runtimeConfiguration + self.schedulerConfiguration = schedulerConfiguration self.state = state self.stateDetails = stateDetails self.tags = tags @@ -197,6 +201,7 @@ extension EMRServerless { case networkConfiguration = "networkConfiguration" case releaseLabel = "releaseLabel" case runtimeConfiguration = "runtimeConfiguration" + case schedulerConfiguration = "schedulerConfiguration" case state = "state" case stateDetails = "stateDetails" case tags = "tags" @@ -361,7 +366,7 @@ extension EMRServerless { public func validate(name: String) throws { try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, max: 2048) try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, min: 20) - try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+$") + try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+$") try self.validate(self.logGroupName, name: "logGroupName", parent: name, max: 512) try self.validate(self.logGroupName, name: "logGroupName", parent: name, min: 1) try self.validate(self.logGroupName, name: "logGroupName", parent: name, pattern: "^[\\.\\-_/#A-Za-z0-9]+$") @@ -481,6 +486,8 @@ extension EMRServerless { public let releaseLabel: String /// The Configuration specifications to use when creating an application. Each configuration consists of a classification and properties. This configuration is applied to all the job runs submitted under the application. public let runtimeConfiguration: [Configuration]? + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public let schedulerConfiguration: SchedulerConfiguration? /// The tags assigned to the application. public let tags: [String: String]? /// The type of application you want to start, such as Spark or Hive. @@ -489,7 +496,7 @@ extension EMRServerless { public let workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? @inlinable - public init(architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = CreateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, tags: [String: String]? = nil, type: String, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { + public init(architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = CreateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, runtimeConfiguration: [Configuration]? = nil, schedulerConfiguration: SchedulerConfiguration? = nil, tags: [String: String]? = nil, type: String, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { self.architecture = architecture self.autoStartConfiguration = autoStartConfiguration self.autoStopConfiguration = autoStopConfiguration @@ -503,6 +510,7 @@ extension EMRServerless { self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel self.runtimeConfiguration = runtimeConfiguration + self.schedulerConfiguration = schedulerConfiguration self.tags = tags self.type = type self.workerTypeSpecifications = workerTypeSpecifications @@ -565,6 +573,7 @@ extension EMRServerless { case networkConfiguration = "networkConfiguration" case releaseLabel = "releaseLabel" case runtimeConfiguration = "runtimeConfiguration" + case schedulerConfiguration = "schedulerConfiguration" case tags = "tags" case type = "type" case workerTypeSpecifications = "workerTypeSpecifications" @@ -892,6 +901,8 @@ extension EMRServerless { public let createdAt: Date /// The user who created the job run. public let createdBy: String + /// The date and time when the job was terminated. + public let endedAt: Date? /// The execution role ARN of the job run. public let executionRole: String /// Returns the job run timeout value from the StartJobRun call. If no timeout was specified, then it returns the default timeout of 720 minutes. @@ -905,10 +916,14 @@ extension EMRServerless { /// The optional job run name. This doesn't have to be unique. public let name: String? public let networkConfiguration: NetworkConfiguration? + /// The total time for a job in the QUEUED state in milliseconds. + public let queuedDurationMilliseconds: Int64? /// The Amazon EMR release associated with the application your job is running on. public let releaseLabel: String /// The retry policy of the job run. public let retryPolicy: RetryPolicy? + /// The date and time when the job moved to the RUNNING state. + public let startedAt: Date? /// The state of the job run. public let state: JobRunState /// The state details of the job run. @@ -923,7 +938,7 @@ extension EMRServerless { public let updatedAt: Date @inlinable - public init(applicationId: String, arn: String, attempt: Int? = nil, attemptCreatedAt: Date? = nil, attemptUpdatedAt: Date? = nil, billedResourceUtilization: ResourceUtilization? = nil, configurationOverrides: ConfigurationOverrides? = nil, createdAt: Date, createdBy: String, executionRole: String, executionTimeoutMinutes: Int64? = nil, jobDriver: JobDriver, jobRunId: String, mode: JobRunMode? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String, retryPolicy: RetryPolicy? = nil, state: JobRunState, stateDetails: String, tags: [String: String]? = nil, totalExecutionDurationSeconds: Int? = nil, totalResourceUtilization: TotalResourceUtilization? = nil, updatedAt: Date) { + public init(applicationId: String, arn: String, attempt: Int? = nil, attemptCreatedAt: Date? = nil, attemptUpdatedAt: Date? = nil, billedResourceUtilization: ResourceUtilization? = nil, configurationOverrides: ConfigurationOverrides? = nil, createdAt: Date, createdBy: String, endedAt: Date? = nil, executionRole: String, executionTimeoutMinutes: Int64? = nil, jobDriver: JobDriver, jobRunId: String, mode: JobRunMode? = nil, name: String? = nil, networkConfiguration: NetworkConfiguration? = nil, queuedDurationMilliseconds: Int64? = nil, releaseLabel: String, retryPolicy: RetryPolicy? = nil, startedAt: Date? = nil, state: JobRunState, stateDetails: String, tags: [String: String]? = nil, totalExecutionDurationSeconds: Int? = nil, totalResourceUtilization: TotalResourceUtilization? = nil, updatedAt: Date) { self.applicationId = applicationId self.arn = arn self.attempt = attempt @@ -933,6 +948,7 @@ extension EMRServerless { self.configurationOverrides = configurationOverrides self.createdAt = createdAt self.createdBy = createdBy + self.endedAt = endedAt self.executionRole = executionRole self.executionTimeoutMinutes = executionTimeoutMinutes self.jobDriver = jobDriver @@ -940,8 +956,10 @@ extension EMRServerless { self.mode = mode self.name = name self.networkConfiguration = networkConfiguration + self.queuedDurationMilliseconds = queuedDurationMilliseconds self.releaseLabel = releaseLabel self.retryPolicy = retryPolicy + self.startedAt = startedAt self.state = state self.stateDetails = stateDetails self.tags = tags @@ -960,6 +978,7 @@ extension EMRServerless { case configurationOverrides = "configurationOverrides" case createdAt = "createdAt" case createdBy = "createdBy" + case endedAt = "endedAt" case executionRole = "executionRole" case executionTimeoutMinutes = "executionTimeoutMinutes" case jobDriver = "jobDriver" @@ -967,8 +986,10 @@ extension EMRServerless { case mode = "mode" case name = "name" case networkConfiguration = "networkConfiguration" + case queuedDurationMilliseconds = "queuedDurationMilliseconds" case releaseLabel = "releaseLabel" case retryPolicy = "retryPolicy" + case startedAt = "startedAt" case state = "state" case stateDetails = "stateDetails" case tags = "tags" @@ -1357,7 +1378,7 @@ extension EMRServerless { public func validate(name: String) throws { try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, max: 2048) try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, min: 20) - try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+$") + try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+$") } private enum CodingKeys: String, CodingKey { @@ -1545,7 +1566,7 @@ extension EMRServerless { public func validate(name: String) throws { try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, max: 2048) try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, min: 20) - try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+$") + try self.validate(self.encryptionKeyArn, name: "encryptionKeyArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+$") try self.validate(self.logUri, name: "logUri", parent: name, max: 10280) try self.validate(self.logUri, name: "logUri", parent: name, min: 1) try self.validate(self.logUri, name: "logUri", parent: name, pattern: "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\r\\n\\t]*") @@ -1557,6 +1578,24 @@ extension EMRServerless { } } + public struct SchedulerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000. + public let maxConcurrentRuns: Int? + /// The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720. + public let queueTimeoutMinutes: Int? + + @inlinable + public init(maxConcurrentRuns: Int? = nil, queueTimeoutMinutes: Int? = nil) { + self.maxConcurrentRuns = maxConcurrentRuns + self.queueTimeoutMinutes = queueTimeoutMinutes + } + + private enum CodingKeys: String, CodingKey { + case maxConcurrentRuns = "maxConcurrentRuns" + case queueTimeoutMinutes = "queueTimeoutMinutes" + } + } + public struct SparkSubmit: AWSEncodableShape & AWSDecodableShape { /// The entry point for the Spark submit job run. public let entryPoint: String @@ -1682,7 +1721,7 @@ extension EMRServerless { try self.configurationOverrides?.validate(name: "\(name).configurationOverrides") try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, max: 2048) try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, min: 20) - try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)$") + try self.validate(self.executionRoleArn, name: "executionRoleArn", parent: name, pattern: "^arn:(aws[a-zA-Z0-9-]*):iam::([0-9]{12}):(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)$") try self.validate(self.executionTimeoutMinutes, name: "executionTimeoutMinutes", parent: name, max: 1000000) try self.validate(self.executionTimeoutMinutes, name: "executionTimeoutMinutes", parent: name, min: 0) try self.jobDriver?.validate(name: "\(name).jobDriver") @@ -1892,11 +1931,13 @@ extension EMRServerless { public let releaseLabel: String? /// The Configuration specifications to use when updating an application. Each configuration consists of a classification and properties. This configuration is applied across all the job runs submitted under the application. public let runtimeConfiguration: [Configuration]? + /// The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above. + public let schedulerConfiguration: SchedulerConfiguration? /// The key-value pairs that specify worker type to WorkerTypeSpecificationInput. This parameter must contain all valid worker types for a Spark or Hive application. Valid worker types include Driver and Executor for Spark applications and HiveDriver and TezTask for Hive applications. You can either set image details in this parameter for each worker type, or in imageConfiguration for all worker types. public let workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? @inlinable - public init(applicationId: String, architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = UpdateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String? = nil, runtimeConfiguration: [Configuration]? = nil, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { + public init(applicationId: String, architecture: Architecture? = nil, autoStartConfiguration: AutoStartConfig? = nil, autoStopConfiguration: AutoStopConfig? = nil, clientToken: String = UpdateApplicationRequest.idempotencyToken(), imageConfiguration: ImageConfigurationInput? = nil, initialCapacity: [String: InitialCapacityConfig]? = nil, interactiveConfiguration: InteractiveConfiguration? = nil, maximumCapacity: MaximumAllowedResources? = nil, monitoringConfiguration: MonitoringConfiguration? = nil, networkConfiguration: NetworkConfiguration? = nil, releaseLabel: String? = nil, runtimeConfiguration: [Configuration]? = nil, schedulerConfiguration: SchedulerConfiguration? = nil, workerTypeSpecifications: [String: WorkerTypeSpecificationInput]? = nil) { self.applicationId = applicationId self.architecture = architecture self.autoStartConfiguration = autoStartConfiguration @@ -1910,6 +1951,7 @@ extension EMRServerless { self.networkConfiguration = networkConfiguration self.releaseLabel = releaseLabel self.runtimeConfiguration = runtimeConfiguration + self.schedulerConfiguration = schedulerConfiguration self.workerTypeSpecifications = workerTypeSpecifications } @@ -1929,6 +1971,7 @@ extension EMRServerless { try container.encodeIfPresent(self.networkConfiguration, forKey: .networkConfiguration) try container.encodeIfPresent(self.releaseLabel, forKey: .releaseLabel) try container.encodeIfPresent(self.runtimeConfiguration, forKey: .runtimeConfiguration) + try container.encodeIfPresent(self.schedulerConfiguration, forKey: .schedulerConfiguration) try container.encodeIfPresent(self.workerTypeSpecifications, forKey: .workerTypeSpecifications) } @@ -1978,6 +2021,7 @@ extension EMRServerless { case networkConfiguration = "networkConfiguration" case releaseLabel = "releaseLabel" case runtimeConfiguration = "runtimeConfiguration" + case schedulerConfiguration = "schedulerConfiguration" case workerTypeSpecifications = "workerTypeSpecifications" } } diff --git a/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift b/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift index 917f101799..9b95f29aac 100644 --- a/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift +++ b/Sources/Soto/Services/ElastiCache/ElastiCache_api.swift @@ -261,7 +261,7 @@ public struct ElastiCache: AWSService { return try await self.completeMigration(input, logger: logger) } - /// Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only. + /// Creates a copy of an existing serverless cache’s snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. @Sendable @inlinable public func copyServerlessCacheSnapshot(_ input: CopyServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CopyServerlessCacheSnapshotResponse { @@ -274,13 +274,13 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only. + /// Creates a copy of an existing serverless cache’s snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. /// /// Parameters: - /// - kmsKeyId: The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only. - /// - sourceServerlessCacheSnapshotName: The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only. - /// - tags: A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL - /// - targetServerlessCacheSnapshotName: The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only. + /// - kmsKeyId: The identifier of the KMS key used to encrypt the target snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - sourceServerlessCacheSnapshotName: The identifier of the existing serverless cache’s snapshot to be copied. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - tags: A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL + /// - targetServerlessCacheSnapshotName: The identifier for the snapshot to be created. Available for Valkey, Redis OSS and Serverless Memcached only. /// - logger: Logger use during operation @inlinable public func copyServerlessCacheSnapshot( @@ -299,7 +299,7 @@ public struct ElastiCache: AWSService { return try await self.copyServerlessCacheSnapshot(input, logger: logger) } - /// Makes a copy of an existing snapshot. This operation is valid for Redis OSS only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. + /// Makes a copy of an existing snapshot. This operation is valid for Valkey or Redis OSS only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. @Sendable @inlinable public func copySnapshot(_ input: CopySnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CopySnapshotResult { @@ -312,7 +312,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Makes a copy of an existing snapshot. This operation is valid for Redis OSS only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. + /// Makes a copy of an existing snapshot. This operation is valid for Valkey or Redis OSS only. Users or groups that have permissions to use the CopySnapshot operation can create their own Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy to control who has the ability to use the CopySnapshot operation. For more information about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and Authentication & Access Control. You could receive the following error messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The authenticated user does not have sufficient permissions to perform the desired activity. Solution: Contact your system administrator to get the needed permissions. Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. /// /// Parameters: /// - kmsKeyId: The ID of the KMS key used to encrypt the target snapshot. @@ -340,7 +340,7 @@ public struct ElastiCache: AWSService { return try await self.copySnapshot(input, logger: logger) } - /// Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis OSS. This operation is not supported for Redis OSS (cluster mode enabled) clusters. + /// Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached, Valkey or Redis OSS. This operation is not supported for Valkey or Redis OSS (cluster mode enabled) clusters. @Sendable @inlinable public func createCacheCluster(_ input: CreateCacheClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateCacheClusterResult { @@ -353,11 +353,11 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached or Redis OSS. This operation is not supported for Redis OSS (cluster mode enabled) clusters. + /// Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine software, either Memcached, Valkey or Redis OSS. This operation is not supported for Valkey or Redis OSS (cluster mode enabled) clusters. /// /// Parameters: /// - authToken: Reserved parameter. The password used to access a password protected server. Password constraints: Must be only printable ASCII characters. Must be at least 16 characters and no more than 128 characters in length. The only permitted printable special characters are !, &, #, $, ^, , and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. - /// - autoMinorVersionUpgrade:  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + /// - autoMinorVersionUpgrade:  If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. /// - azMode: Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. This parameter is only supported for Memcached clusters. If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. /// - cacheClusterId: The node group (shard) identifier. This parameter is stored as a lowercase string. Constraints: A name must contain from 1 to 50 alphanumeric characters or hyphens. The first character must be a letter. A name cannot end with a hyphen or contain two consecutive hyphens. /// - cacheNodeType: The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): @@ -366,11 +366,11 @@ public struct ElastiCache: AWSService { /// - cacheSubnetGroupName: The name of the subnet group to be used for the cluster. Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups. /// - engine: The name of the cache engine to be used for this cluster. Valid values for this parameter are: memcached | redis /// - engineVersion: The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. - /// - ipDiscovery: The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// - ipDiscovery: The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. /// - logDeliveryConfigurations: Specifies the destination, format and type of the logs. - /// - networkType: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// - networkType: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. /// - notificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. - /// - numCacheNodes: The initial number of cache nodes that the cluster has. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. + /// - numCacheNodes: The initial number of cache nodes that the cluster has. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. /// - outpostMode: Specifies whether the nodes in the cluster are created in a single outpost or across multiple outposts. /// - port: The port number on which each of the cache nodes accepts connections. /// - preferredAvailabilityZone: The EC2 Availability Zone in which the cluster is created. All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones. Default: System chosen Availability Zone. @@ -380,8 +380,8 @@ public struct ElastiCache: AWSService { /// - preferredOutpostArns: The outpost ARNs in which the cache cluster is created. /// - replicationGroupId: The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group. If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones. This parameter is only valid if the Engine parameter is redis. /// - securityGroupIds: One or more VPC security groups associated with the cluster. Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). - /// - snapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb - /// - snapshotName: The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. + /// - snapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + /// - snapshotName: The name of a Valkey or Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. /// - snapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted. This parameter is only valid if the Engine parameter is redis. Default: 0 (i.e., automatic backups are disabled for this cache cluster). /// - snapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. This parameter is only valid if the Engine parameter is redis. /// - tags: A list of tags to be added to this resource. @@ -567,7 +567,7 @@ public struct ElastiCache: AWSService { return try await self.createCacheSubnetGroup(input, logger: logger) } - /// Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. + /// Global Datastore offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore with Valkey or Redis OSS, you can create cross-region read replica clusters for ElastiCache to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. @Sendable @inlinable public func createGlobalReplicationGroup(_ input: CreateGlobalReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateGlobalReplicationGroupResult { @@ -580,7 +580,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. + /// Global Datastore offers fully managed, fast, reliable and secure cross-region replication. Using Global Datastore with Valkey or Redis OSS, you can create cross-region read replica clusters for ElastiCache to enable low-latency reads and disaster recovery across regions. For more information, see Replication Across Regions Using Global Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. /// /// Parameters: /// - globalReplicationGroupDescription: Provides details of the Global datastore @@ -602,7 +602,7 @@ public struct ElastiCache: AWSService { return try await self.createGlobalReplicationGroup(input, logger: logger) } - /// Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide. This operation is valid for Redis OSS only. + /// Creates a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Valkey or Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Valkey or Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Valkey or Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Valkey or Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use scaling. For more information, see Scaling self-designed clusters in the ElastiCache User Guide. This operation is valid for Valkey and Redis OSS only. @Sendable @inlinable public func createReplicationGroup(_ input: CreateReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateReplicationGroupResult { @@ -615,31 +615,31 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide. This operation is valid for Redis OSS only. + /// Creates a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication group. This API can be used to create a standalone regional replication group or a secondary replication group associated with a Global datastore. A Valkey or Redis OSS (cluster mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas. A Valkey or Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or shard limit can be increased to a maximum of 500 per cluster if the Valkey or Redis OSS engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and no replicas). Make sure there are enough available IP addresses to accommodate the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per cluster per instance type. When a Valkey or Redis OSS (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. If you need to increase or decrease the number of node groups (console: shards), you can use scaling. For more information, see Scaling self-designed clusters in the ElastiCache User Guide. This operation is valid for Valkey and Redis OSS only. /// /// Parameters: /// - atRestEncryptionEnabled: A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false /// - authToken: Reserved parameter. The password used to access a password protected server. AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. Password constraints: Must be only printable ASCII characters. Must be at least 16 characters and no more than 128 characters in length. The only permitted printable special characters are !, &, #, $, ^, , and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. - /// - automaticFailoverEnabled: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false - /// - autoMinorVersionUpgrade:  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + /// - automaticFailoverEnabled: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups. Default: false + /// - autoMinorVersionUpgrade:  If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. /// - cacheNodeType: The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): - /// - cacheParameterGroupName: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + /// - cacheParameterGroupName: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Valkey or Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Valkey or Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. /// - cacheSecurityGroupNames: A list of cache security group names to associate with this replication group. /// - cacheSubnetGroupName: The name of the cache subnet group to be used for the replication group. If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups. - /// - clusterMode: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// - clusterMode: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. /// - dataTieringEnabled: Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering. /// - engine: The name of the cache engine to be used for the clusters in this replication group. The value must be set to Redis. /// - engineVersion: The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version) in the ElastiCache User Guide, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. /// - globalReplicationGroupId: The name of the Global datastore - /// - ipDiscovery: The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// - ipDiscovery: The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. /// - kmsKeyId: The ID of the KMS key used to encrypt the disk in the cluster. /// - logDeliveryConfigurations: Specifies the destination, format and type of the logs. /// - multiAZEnabled: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ. - /// - networkType: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. - /// - nodeGroupConfiguration: A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group. + /// - networkType: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. + /// - nodeGroupConfiguration: A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Valkey or Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group. /// - notificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. /// - numCacheClusters: The number of clusters this replication group initially has. This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead. If AutomaticFailoverEnabled is true, the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6. The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). - /// - numNodeGroups: An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 + /// - numNodeGroups: An optional parameter that specifies the number of node groups (shards) for this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey or Redis OSS (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 /// - port: The port number on which each member of the replication group accepts connections. /// - preferredCacheClusterAZs: A list of EC2 Availability Zones in which the replication group's clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list. This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead. If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of NumCacheClusters. Default: system chosen Availability Zones. /// - preferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 @@ -648,14 +648,14 @@ public struct ElastiCache: AWSService { /// - replicationGroupDescription: A user-created description for the replication group. /// - replicationGroupId: The replication group identifier. This parameter is stored as a lowercase string. Constraints: A name must contain from 1 to 40 alphanumeric characters or hyphens. The first character must be a letter. A name cannot end with a hyphen or contain two consecutive hyphens. /// - securityGroupIds: One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). - /// - serverlessCacheSnapshotName: The name of the snapshot used to create a replication group. Available for Redis OSS only. - /// - snapshotArns: A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + /// - serverlessCacheSnapshotName: The name of the snapshot used to create a replication group. Available for Valkey, Redis OSS only. + /// - snapshotArns: A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb /// - snapshotName: The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created. /// - snapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Default: 0 (i.e., automatic backups are disabled for this cluster). /// - snapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. /// - tags: A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. Tags on replication groups will be replicated to all nodes. /// - transitEncryptionEnabled: A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. - /// - transitEncryptionMode: A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group. + /// - transitEncryptionMode: A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group. /// - userGroupIds: The user group to associate with the replication group. /// - logger: Logger use during operation @inlinable @@ -762,18 +762,18 @@ public struct ElastiCache: AWSService { /// /// Parameters: /// - cacheUsageLimits: Sets the cache usage limits for storage and ElastiCache Processing Units for the cache. - /// - dailySnapshotTime: The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only. + /// - dailySnapshotTime: The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Valkey, Redis OSS and Serverless Memcached only. /// - description: User-provided description for the serverless cache. The default is NULL, i.e. if no description is provided then an empty string will be returned. The maximum length is 255 characters. /// - engine: The name of the cache engine to be used for creating the serverless cache. /// - kmsKeyId: ARN of the customer managed key for encrypting the data at rest. If no KMS key is provided, a default service key is used. /// - majorEngineVersion: The version of the cache engine that will be used to create the serverless cache. /// - securityGroupIds: A list of the one or more VPC security groups to be associated with the serverless cache. The security group will authorize traffic access for the VPC end-point (private-link). If no other information is given this will be the VPC’s Default Security Group that is associated with the cluster VPC end-point. /// - serverlessCacheName: User-provided identifier for the serverless cache. This parameter is stored as a lowercase string. - /// - snapshotArnsToRestore: The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only. - /// - snapshotRetentionLimit: The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only. + /// - snapshotArnsToRestore: The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - snapshotRetentionLimit: The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Valkey, Redis OSS and Serverless Memcached only. /// - subnetIds: A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. /// - tags: The list of tags (key, value) pairs to be added to the serverless cache resource. Default is NULL. - /// - userGroupId: The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL. + /// - userGroupId: The identifier of the UserGroup to be associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL. /// - logger: Logger use during operation @inlinable public func createServerlessCache( @@ -810,7 +810,7 @@ public struct ElastiCache: AWSService { return try await self.createServerlessCache(input, logger: logger) } - /// This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only. + /// This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Valkey, Redis OSS and Serverless Memcached only. @Sendable @inlinable public func createServerlessCacheSnapshot(_ input: CreateServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateServerlessCacheSnapshotResponse { @@ -823,13 +823,13 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only. + /// This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Valkey, Redis OSS and Serverless Memcached only. /// /// Parameters: - /// - kmsKeyId: The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL - /// - serverlessCacheName: The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only. - /// - serverlessCacheSnapshotName: The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters. - /// - tags: A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. + /// - kmsKeyId: The ID of the KMS key used to encrypt the snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL + /// - serverlessCacheName: The name of an existing serverless cache. The snapshot is created from this cache. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - serverlessCacheSnapshotName: The name for the snapshot being created. Must be unique for the customer account. Available for Valkey, Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters. + /// - tags: A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Valkey, Redis OSS and Serverless Memcached only. /// - logger: Logger use during operation @inlinable public func createServerlessCacheSnapshot( @@ -848,7 +848,7 @@ public struct ElastiCache: AWSService { return try await self.createServerlessCacheSnapshot(input, logger: logger) } - /// Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Redis OSS only. + /// Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Valkey or Redis OSS only. @Sendable @inlinable public func createSnapshot(_ input: CreateSnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSnapshotResult { @@ -861,7 +861,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Redis OSS only. + /// Creates a copy of an entire cluster or replication group at a specific moment in time. This operation is valid for Valkey or Redis OSS only. /// /// Parameters: /// - cacheClusterId: The identifier of an existing cluster. The snapshot is created from this cluster. @@ -889,7 +889,7 @@ public struct ElastiCache: AWSService { return try await self.createSnapshot(input, logger: logger) } - /// For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see Using Role Based Access Control (RBAC). + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 and onwards: Creates a user. For more information, see Using Role Based Access Control (RBAC). @Sendable @inlinable public func createUser(_ input: CreateUserMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> User { @@ -902,7 +902,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see Using Role Based Access Control (RBAC). + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 and onwards: Creates a user. For more information, see Using Role Based Access Control (RBAC). /// /// Parameters: /// - accessString: Access permissions string used for this user. @@ -939,7 +939,7 @@ public struct ElastiCache: AWSService { return try await self.createUser(input, logger: logger) } - /// For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more information, see Using Role Based Access Control (RBAC) + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Creates a user group. For more information, see Using Role Based Access Control (RBAC) @Sendable @inlinable public func createUserGroup(_ input: CreateUserGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> UserGroup { @@ -952,11 +952,11 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more information, see Using Role Based Access Control (RBAC) + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Creates a user group. For more information, see Using Role Based Access Control (RBAC) /// /// Parameters: /// - engine: The current supported value is Redis user. - /// - tags: A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis OSS only. + /// - tags: A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Valkey and Redis OSS only. /// - userGroupId: The ID of the user group. /// - userIds: The list of user IDs that belong to the user group. /// - logger: Logger use during operation @@ -994,8 +994,8 @@ public struct ElastiCache: AWSService { /// /// Parameters: /// - applyImmediately: Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true. - /// - globalNodeGroupsToRemove: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. - /// - globalNodeGroupsToRetain: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. + /// - globalNodeGroupsToRemove: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. + /// - globalNodeGroupsToRetain: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. /// - globalReplicationGroupId: The name of the Global datastore /// - nodeGroupCount: The number of node groups (shards) that results from the modification of the shard configuration /// - logger: Logger use during operation @@ -1018,7 +1018,7 @@ public struct ElastiCache: AWSService { return try await self.decreaseNodeGroupsInGlobalReplicationGroup(input, logger: logger) } - /// Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. + /// Dynamically decreases the number of replicas in a Valkey or Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Valkey or Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. @Sendable @inlinable public func decreaseReplicaCount(_ input: DecreaseReplicaCountMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DecreaseReplicaCountResult { @@ -1031,12 +1031,12 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. + /// Dynamically decreases the number of replicas in a Valkey or Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Valkey or Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. /// /// Parameters: /// - applyImmediately: If True, the number of replica nodes is decreased immediately. ApplyImmediately=False is not currently supported. - /// - newReplicaCount: The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Redis OSS (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) - /// - replicaConfiguration: A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + /// - newReplicaCount: The number of read replica nodes you want at the completion of this operation. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Valkey or Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Valkey or Redis OSS (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Valkey or Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) + /// - replicaConfiguration: A list of ConfigureShard objects that can be used to configure each shard in a Valkey or Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. /// - replicasToRemove: A list of the node ids to remove from the replication group or node group (shard). /// - replicationGroupId: The id of the replication group from which you want to remove replica nodes. /// - logger: Logger use during operation @@ -1059,7 +1059,7 @@ public struct ElastiCache: AWSService { return try await self.decreaseReplicaCount(input, logger: logger) } - /// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Redis OSS (cluster mode enabled) clusters Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state + /// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Valkey or Redis OSS (cluster mode enabled) clusters Valkey or Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Valkey or Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state @Sendable @inlinable public func deleteCacheCluster(_ input: DeleteCacheClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteCacheClusterResult { @@ -1072,7 +1072,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Redis OSS (cluster mode enabled) clusters Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state + /// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot cancel or revert this operation. This operation is not valid for: Valkey or Redis OSS (cluster mode enabled) clusters Valkey or Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication group A node group (shard) that has Multi-AZ mode enabled A cluster from a Valkey or Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state /// /// Parameters: /// - cacheClusterId: The cluster identifier for the cluster to be deleted. This parameter is not case sensitive. @@ -1261,7 +1261,7 @@ public struct ElastiCache: AWSService { /// Deletes a specified existing serverless cache. CreateServerlessCacheSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. /// /// Parameters: - /// - finalSnapshotName: Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken. + /// - finalSnapshotName: Name of the final snapshot to be taken before the serverless cache is deleted. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken. /// - serverlessCacheName: The identifier of the serverless cache to be deleted. /// - logger: Logger use during operation @inlinable @@ -1277,7 +1277,7 @@ public struct ElastiCache: AWSService { return try await self.deleteServerlessCache(input, logger: logger) } - /// Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. + /// Deletes an existing serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. @Sendable @inlinable public func deleteServerlessCacheSnapshot(_ input: DeleteServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteServerlessCacheSnapshotResponse { @@ -1290,10 +1290,10 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. + /// Deletes an existing serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. /// /// Parameters: - /// - serverlessCacheSnapshotName: Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only. + /// - serverlessCacheSnapshotName: Idenfitier of the snapshot to be deleted. Available for Valkey, Redis OSS and Serverless Memcached only. /// - logger: Logger use during operation @inlinable public func deleteServerlessCacheSnapshot( @@ -1306,7 +1306,7 @@ public struct ElastiCache: AWSService { return try await self.deleteServerlessCacheSnapshot(input, logger: logger) } - /// Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Redis OSS only. + /// Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Valkey or Redis OSS only. @Sendable @inlinable public func deleteSnapshot(_ input: DeleteSnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteSnapshotResult { @@ -1319,7 +1319,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Redis OSS only. + /// Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this operation. This operation is valid for Valkey or Redis OSS only. /// /// Parameters: /// - snapshotName: The name of the snapshot to be deleted. @@ -1335,7 +1335,7 @@ public struct ElastiCache: AWSService { return try await self.deleteSnapshot(input, logger: logger) } - /// For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). @Sendable @inlinable public func deleteUser(_ input: DeleteUserMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> User { @@ -1348,7 +1348,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). /// /// Parameters: /// - userId: The ID of the user. @@ -1364,7 +1364,7 @@ public struct ElastiCache: AWSService { return try await self.deleteUser(input, logger: logger) } - /// For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). @Sendable @inlinable public func deleteUserGroup(_ input: DeleteUserGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> UserGroup { @@ -1377,7 +1377,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). + /// For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user group. The user group must first be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). /// /// Parameters: /// - userGroupId: The ID of the user group. @@ -1412,7 +1412,7 @@ public struct ElastiCache: AWSService { /// - cacheClusterId: The user-supplied cluster identifier. If this parameter is specified, only information about that specific cluster is returned. This parameter isn't case sensitive. /// - marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. - /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters. + /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this means Memcached and single node Valkey or Redis OSS clusters. /// - showCacheNodeInfo: An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. /// - logger: Logger use during operation @inlinable @@ -1450,7 +1450,7 @@ public struct ElastiCache: AWSService { /// Returns a list of the available cache engines and their versions. /// /// Parameters: - /// - cacheParameterGroupFamily: The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens + /// - cacheParameterGroupFamily: The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 | valkey7 Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens /// - defaultOnly: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. /// - engine: The cache engine to return. Valid values: memcached | redis /// - engineVersion: The cache engine version to return. Example: 1.4.14 @@ -1741,7 +1741,7 @@ public struct ElastiCache: AWSService { return try await self.describeGlobalReplicationGroups(input, logger: logger) } - /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis OSS only. + /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Valkey or Redis OSS only. @Sendable @inlinable public func describeReplicationGroups(_ input: DescribeReplicationGroupsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ReplicationGroupMessage { @@ -1754,7 +1754,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Redis OSS only. + /// Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation is valid for Valkey or Redis OSS only. /// /// Parameters: /// - marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -1873,7 +1873,7 @@ public struct ElastiCache: AWSService { return try await self.describeReservedCacheNodesOfferings(input, logger: logger) } - /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis OSS and Serverless Memcached only. + /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only. @Sendable @inlinable public func describeServerlessCacheSnapshots(_ input: DescribeServerlessCacheSnapshotsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeServerlessCacheSnapshotsResponse { @@ -1886,14 +1886,14 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Redis OSS and Serverless Memcached only. + /// Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only. /// /// Parameters: - /// - maxResults: The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. - /// - nextToken: An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only. - /// - serverlessCacheName: The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only. - /// - serverlessCacheSnapshotName: The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only. - /// - snapshotType: The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only. + /// - maxResults: The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Valkey, Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. + /// - nextToken: An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - serverlessCacheName: The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - serverlessCacheSnapshotName: The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - snapshotType: The type of snapshot that is being described. Available for Valkey, Redis OSS and Serverless Memcached only. /// - logger: Logger use during operation @inlinable public func describeServerlessCacheSnapshots( @@ -1987,7 +1987,7 @@ public struct ElastiCache: AWSService { return try await self.describeServiceUpdates(input, logger: logger) } - /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis OSS only. + /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Valkey or Redis OSS only. @Sendable @inlinable public func describeSnapshots(_ input: DescribeSnapshotsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSnapshotsListMessage { @@ -2000,7 +2000,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Redis OSS only. + /// Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation is valid for Valkey or Redis OSS only. /// /// Parameters: /// - cacheClusterId: A user-supplied cluster identifier. If this parameter is specified, only snapshots associated with that specific cluster are described. @@ -2051,7 +2051,7 @@ public struct ElastiCache: AWSService { /// /// Parameters: /// - cacheClusterIds: The cache cluster IDs - /// - engine: The Elasticache engine to which the update applies. Either Redis OSS or Memcached. + /// - engine: The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached. /// - marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response /// - replicationGroupIds: The replication group IDs @@ -2141,7 +2141,7 @@ public struct ElastiCache: AWSService { /// Returns a list of users. /// /// Parameters: - /// - engine: The Redis OSS engine. + /// - engine: The engine. /// - filters: Filter to determine the list of User IDs to return. /// - marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. > /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. @@ -2201,7 +2201,7 @@ public struct ElastiCache: AWSService { return try await self.disassociateGlobalReplicationGroup(input, logger: logger) } - /// Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only. + /// Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Valkey and Redis OSS only. @Sendable @inlinable public func exportServerlessCacheSnapshot(_ input: ExportServerlessCacheSnapshotRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExportServerlessCacheSnapshotResponse { @@ -2214,11 +2214,11 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only. + /// Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Valkey and Redis OSS only. /// /// Parameters: - /// - s3BucketName: Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis OSS only. - /// - serverlessCacheSnapshotName: The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only. + /// - s3BucketName: Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Valkey and Redis OSS only. + /// - serverlessCacheSnapshotName: The identifier of the serverless cache snapshot to be exported to S3. Available for Valkey and Redis OSS only. /// - logger: Logger use during operation @inlinable public func exportServerlessCacheSnapshot( @@ -2306,7 +2306,7 @@ public struct ElastiCache: AWSService { return try await self.increaseNodeGroupsInGlobalReplicationGroup(input, logger: logger) } - /// Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. + /// Dynamically increases the number of replicas in a Valkey or Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Valkey or Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. @Sendable @inlinable public func increaseReplicaCount(_ input: IncreaseReplicaCountMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> IncreaseReplicaCountResult { @@ -2319,12 +2319,12 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. + /// Dynamically increases the number of replicas in a Valkey or Redis OSS (cluster mode disabled) replication group or the number of replica nodes in one or more node groups (shards) of a Valkey or Redis OSS (cluster mode enabled) replication group. This operation is performed with no cluster down time. /// /// Parameters: /// - applyImmediately: If True, the number of replica nodes is increased immediately. ApplyImmediately=False is not currently supported. - /// - newReplicaCount: The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. - /// - replicaConfiguration: A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + /// - newReplicaCount: The number of read replica nodes you want at the completion of this operation. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Valkey or Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. + /// - replicaConfiguration: A list of ConfigureShard objects that can be used to configure each shard in a Valkey or Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. /// - replicationGroupId: The id of the replication group to which you want to add replica nodes. /// - logger: Logger use during operation @inlinable @@ -2344,7 +2344,7 @@ public struct ElastiCache: AWSService { return try await self.increaseReplicaCount(input, logger: logger) } - /// Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. + /// Lists all available node types that you can scale with your cluster's replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. @Sendable @inlinable public func listAllowedNodeTypeModifications(_ input: ListAllowedNodeTypeModificationsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> AllowedNodeTypeModificationsMessage { @@ -2357,7 +2357,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. + /// Lists all available node types that you can scale with your cluster's replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. /// /// Parameters: /// - cacheClusterId: The name of the cluster you want to scale up to a larger node instanced type. ElastiCache uses the cluster id to identify the current node type of this cluster and from that to create a list of node types you can scale up to. You must provide a value for either the CacheClusterId or the ReplicationGroupId. @@ -2423,21 +2423,22 @@ public struct ElastiCache: AWSService { /// Parameters: /// - applyImmediately: If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the cluster. If false, changes to the cluster are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first. If you perform a ModifyCacheCluster before a pending modification is applied, the pending modification is replaced by the newer modification. Valid values: true | false Default: false /// - authToken: Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. - /// - authTokenUpdateStrategy: Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH - /// - autoMinorVersionUpgrade:  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + /// - authTokenUpdateStrategy: Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with AUTH + /// - autoMinorVersionUpgrade:  If you are running Valkey 7.2 or Redis OSS engine version 6.0 or later, set this parameter to yes to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. /// - azMode: Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones. Valid values: single-az | cross-az. This option is only supported for Memcached clusters. You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes are located in different Availability Zones. /// - cacheClusterId: The cluster identifier. This value is stored as a lowercase string. /// - cacheNodeIdsToRemove: A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of NumCacheNodes in the request. For example: If you have 3 active cache nodes, 7 pending cache nodes, and the number of cache nodes in this ModifyCacheCluster call is 5, you must list 2 (7 - 5) cache node IDs to remove. /// - cacheNodeType: A valid cache node type that you want to scale this cluster up to. /// - cacheParameterGroupName: The name of the cache parameter group to apply to this cluster. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request. /// - cacheSecurityGroupNames: A list of cache security group names to authorize on this cluster. This change is asynchronously applied as soon as possible. You can use this parameter only with clusters that are created outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be "Default". + /// - engine: Modifies the engine listed in a cluster message. The options are redis, memcached or valkey. /// - engineVersion: The upgraded version of the cache engine to be run on the cache nodes. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. - /// - ipDiscovery: The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// - ipDiscovery: The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. /// - logDeliveryConfigurations: Specifies the destination, format and type of the logs. /// - newAvailabilityZones: This option is only supported on Memcached clusters. The list of Availability Zones where the new Memcached cache nodes are created. This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request. Scenarios: Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for the two new nodes. Scenario 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone for the new node. Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all pending operations. The Availability Zone placement of nodes pending creation cannot be modified. If you wish to cancel any nodes pending creation, add 0 nodes by setting NumCacheNodes to the number of current nodes. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes can be located in different Availability Zones. For guidance on how to move existing Memcached nodes to different Availability Zones, see the Availability Zone Considerations section of Cache Node Considerations for Memcached. Impact of new add/remove requests upon pending requests Scenario-1 Pending Action: Delete New Request: Delete Result: The new delete, pending or immediate, replaces the pending delete. Scenario-2 Pending Action: Delete New Request: Create Result: The new create, pending or immediate, replaces the pending delete. Scenario-3 Pending Action: Create New Request: Delete Result: The new delete, pending or immediate, replaces the pending create. Scenario-4 Pending Action: Create New Request: Create Result: The new create is added to the pending create. Important: If the new create request is Apply Immediately - Yes, all creates are performed immediately. If the new create request is Apply Immediately - No, all creates are pending. /// - notificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent. The Amazon SNS topic owner must be same as the cluster owner. /// - notificationTopicStatus: The status of the Amazon SNS notification topic. Notifications are sent only if the status is active. Valid values: active | inactive - /// - numCacheNodes: The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster. + /// - numCacheNodes: The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster. /// - preferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 /// - securityGroupIds: Specifies the VPC Security Groups associated with the cluster. This parameter can be used only with clusters that are created in an Amazon Virtual Private Cloud (Amazon VPC). /// - snapshotRetentionLimit: The number of days for which ElastiCache retains automatic cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. @@ -2455,6 +2456,7 @@ public struct ElastiCache: AWSService { cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, + engine: String? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, @@ -2479,6 +2481,7 @@ public struct ElastiCache: AWSService { cacheNodeType: cacheNodeType, cacheParameterGroupName: cacheParameterGroupName, cacheSecurityGroupNames: cacheSecurityGroupNames, + engine: engine, engineVersion: engineVersion, ipDiscovery: ipDiscovery, logDeliveryConfigurations: logDeliveryConfigurations, @@ -2581,6 +2584,7 @@ public struct ElastiCache: AWSService { /// - automaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. /// - cacheNodeType: A valid cache node type that you want to scale this Global datastore to. /// - cacheParameterGroupName: The name of the cache parameter group to use with the Global datastore. It must be compatible with the major engine version used by the Global datastore. + /// - engine: Modifies the engine listed in a global replication group message. The options are redis, memcached or valkey. /// - engineVersion: The upgraded version of the cache engine to be run on the clusters in the Global datastore. /// - globalReplicationGroupDescription: A description of the Global datastore /// - globalReplicationGroupId: The name of the Global datastore @@ -2591,6 +2595,7 @@ public struct ElastiCache: AWSService { automaticFailoverEnabled: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, + engine: String? = nil, engineVersion: String? = nil, globalReplicationGroupDescription: String? = nil, globalReplicationGroupId: String? = nil, @@ -2601,6 +2606,7 @@ public struct ElastiCache: AWSService { automaticFailoverEnabled: automaticFailoverEnabled, cacheNodeType: cacheNodeType, cacheParameterGroupName: cacheParameterGroupName, + engine: engine, engineVersion: engineVersion, globalReplicationGroupDescription: globalReplicationGroupDescription, globalReplicationGroupId: globalReplicationGroupId @@ -2608,7 +2614,7 @@ public struct ElastiCache: AWSService { return try await self.modifyGlobalReplicationGroup(input, logger: logger) } - /// Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer. Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Redis OSS only. + /// Modifies the settings for a replication group. This is limited to Valkey and Redis OSS 7 and above. Scaling for Valkey or Redis OSS (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Valkey or Redis OSS only. @Sendable @inlinable public func modifyReplicationGroup(_ input: ModifyReplicationGroupMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ModifyReplicationGroupResult { @@ -2621,20 +2627,21 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer. Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Redis OSS only. + /// Modifies the settings for a replication group. This is limited to Valkey and Redis OSS 7 and above. Scaling for Valkey or Redis OSS (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This operation is valid for Valkey or Redis OSS only. /// /// Parameters: /// - applyImmediately: If true, this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group. If false, changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first. Valid values: true | false Default: false /// - authToken: Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update-strategy parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. - /// - authTokenUpdateStrategy: Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH + /// - authTokenUpdateStrategy: Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with AUTH /// - automaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false - /// - autoMinorVersionUpgrade:  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + /// - autoMinorVersionUpgrade:  If you are running Valkey or Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. /// - cacheNodeType: A valid cache node type that you want to scale this replication group to. /// - cacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request. /// - cacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible. This parameter can be used only with replication group containing clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default. - /// - clusterMode: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// - clusterMode: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// - engine: Modifies the engine listed in a replication group message. The options are redis, memcached or valkey. /// - engineVersion: The upgraded version of the cache engine to be run on the clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. - /// - ipDiscovery: The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// - ipDiscovery: The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. /// - logDeliveryConfigurations: Specifies the destination, format and type of the logs. /// - multiAZEnabled: A flag to indicate MultiAZ is enabled. /// - notificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent. The Amazon SNS topic owner must be same as the replication group owner. @@ -2646,10 +2653,10 @@ public struct ElastiCache: AWSService { /// - replicationGroupId: The identifier of the replication group to modify. /// - securityGroupIds: Specifies the VPC Security Groups associated with the clusters in the replication group. This parameter can be used only with replication group containing clusters running in an Amazon Virtual Private Cloud (Amazon VPC). /// - snapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. - /// - snapshottingClusterId: The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups. + /// - snapshottingClusterId: The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Valkey or Redis OSS (cluster mode enabled) replication groups. /// - snapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId. Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. /// - transitEncryptionEnabled: A flag that enables in-transit encryption when set to true. If you are enabling in-transit encryption for an existing cluster, you must also set TransitEncryptionMode to preferred. - /// - transitEncryptionMode: A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. + /// - transitEncryptionMode: A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. /// - userGroupIdsToAdd: The ID of the user group you are associating with the replication group. /// - userGroupIdsToRemove: The ID of the user group to disassociate from the replication group, meaning the users in the group no longer can access the replication group. /// - logger: Logger use during operation @@ -2664,6 +2671,7 @@ public struct ElastiCache: AWSService { cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, clusterMode: ClusterMode? = nil, + engine: String? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, @@ -2695,6 +2703,7 @@ public struct ElastiCache: AWSService { cacheParameterGroupName: cacheParameterGroupName, cacheSecurityGroupNames: cacheSecurityGroupNames, clusterMode: clusterMode, + engine: engine, engineVersion: engineVersion, ipDiscovery: ipDiscovery, logDeliveryConfigurations: logDeliveryConfigurations, @@ -2736,9 +2745,9 @@ public struct ElastiCache: AWSService { /// Parameters: /// - applyImmediately: Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true. Value: true /// - nodeGroupCount: The number of node groups (shards) that results from the modification of the shard configuration. - /// - nodeGroupsToRemove: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. - /// - nodeGroupsToRetain: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. - /// - replicationGroupId: The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured. + /// - nodeGroupsToRemove: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. + /// - nodeGroupsToRetain: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. + /// - replicationGroupId: The name of the Valkey or Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured. /// - reshardingConfiguration: Specifies the preferred availability zones for each node group in the cluster. If the value of NodeGroupCount is greater than the current number of node groups (shards), you can use this parameter to specify the preferred availability zones of the cluster's shards. If you omit this parameter ElastiCache selects availability zones for you. You can specify this parameter only if the value of NodeGroupCount is greater than the current number of node groups (shards). /// - logger: Logger use during operation @inlinable @@ -2779,19 +2788,23 @@ public struct ElastiCache: AWSService { /// /// Parameters: /// - cacheUsageLimits: Modify the cache usage limit for the serverless cache. - /// - dailySnapshotTime: The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed. + /// - dailySnapshotTime: The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed. /// - description: User provided description for the serverless cache. Default = NULL, i.e. the existing description is not removed/modified. The description has a maximum length of 255 characters. - /// - removeUserGroup: The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL. + /// - engine: Modifies the engine listed in a serverless cache request. The options are redis, memcached or valkey. + /// - majorEngineVersion: Modifies the engine vesion listed in a serverless cache request. + /// - removeUserGroup: The identifier of the UserGroup to be removed from association with the Valkey and Redis OSS serverless cache. Available for Valkey and Redis OSS only. Default is NULL. /// - securityGroupIds: The new list of VPC security groups to be associated with the serverless cache. Populating this list means the current VPC security groups will be removed. This security group is used to authorize traffic access for the VPC end-point (private-link). Default = NULL - the existing list of VPC security groups is not removed. /// - serverlessCacheName: User-provided identifier for the serverless cache to be modified. - /// - snapshotRetentionLimit: The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days. - /// - userGroupId: The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not removed. + /// - snapshotRetentionLimit: The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Valkey, Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days. + /// - userGroupId: The identifier of the UserGroup to be associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL - the existing UserGroup is not removed. /// - logger: Logger use during operation @inlinable public func modifyServerlessCache( cacheUsageLimits: CacheUsageLimits? = nil, dailySnapshotTime: String? = nil, description: String? = nil, + engine: String? = nil, + majorEngineVersion: String? = nil, removeUserGroup: Bool? = nil, securityGroupIds: [String]? = nil, serverlessCacheName: String? = nil, @@ -2803,6 +2816,8 @@ public struct ElastiCache: AWSService { cacheUsageLimits: cacheUsageLimits, dailySnapshotTime: dailySnapshotTime, description: description, + engine: engine, + majorEngineVersion: majorEngineVersion, removeUserGroup: removeUserGroup, securityGroupIds: securityGroupIds, serverlessCacheName: serverlessCacheName, @@ -2891,7 +2906,7 @@ public struct ElastiCache: AWSService { return try await self.modifyUserGroup(input, logger: logger) } - /// Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached. + /// Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes. @Sendable @inlinable public func purchaseReservedCacheNodesOffering(_ input: PurchaseReservedCacheNodesOfferingMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> PurchaseReservedCacheNodesOfferingResult { @@ -2904,7 +2919,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached. + /// Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes. /// /// Parameters: /// - cacheNodeCount: The number of cache node instances to reserve. Default: 1 @@ -2961,7 +2976,7 @@ public struct ElastiCache: AWSService { return try await self.rebalanceSlotsInGlobalReplicationGroup(input, logger: logger) } - /// Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled) clusters. If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. + /// Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached, Valkey and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Valkey or Redis OSS (cluster mode enabled) clusters. If you make changes to parameters that require a Valkey or Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. @Sendable @inlinable public func rebootCacheCluster(_ input: RebootCacheClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> RebootCacheClusterResult { @@ -2974,7 +2989,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled) clusters. If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. + /// Reboots some, or all, of the cache nodes within a provisioned cluster. This operation applies any modified cache parameter groups to the cluster. The reboot operation takes place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster event is created. Rebooting a cluster is currently supported on Memcached, Valkey and Redis OSS (cluster mode disabled) clusters. Rebooting is not supported on Valkey or Redis OSS (cluster mode enabled) clusters. If you make changes to parameters that require a Valkey or Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. /// /// Parameters: /// - cacheClusterId: The cluster identifier. This parameter is stored as a lowercase string. @@ -3111,7 +3126,7 @@ public struct ElastiCache: AWSService { /// Start the migration of data. /// /// Parameters: - /// - customerNodeEndpointList: List of endpoints from which data should be migrated. For Redis OSS (cluster mode disabled), list should have only one element. + /// - customerNodeEndpointList: List of endpoints from which data should be migrated. For Valkey or Redis OSS (cluster mode disabled), the list should have only one element. /// - replicationGroupId: The ID of the replication group to which data should be migrated. /// - logger: Logger use during operation @inlinable @@ -3127,7 +3142,7 @@ public struct ElastiCache: AWSService { return try await self.startMigration(input, logger: logger) } - /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. + /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Valkey or Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. @Sendable @inlinable public func testFailover(_ input: TestFailoverMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> TestFailoverResult { @@ -3140,7 +3155,7 @@ public struct ElastiCache: AWSService { logger: logger ) } - /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. + /// Represents the input of a TestFailover operation which tests automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console). This API is designed for testing the behavior of your application in case of ElastiCache failover. It is not designed to be an operational tool for initiating a failover to overcome a problem you may have with the cluster. Moreover, in certain conditions such as large-scale operational events, Amazon may block this API. Note the following A customer can use this operation to test automatic failover on up to 15 shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this operation multiple times on different shards in the same Valkey or Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the following automatic failover related events, listed here in order of occurrance: Replication group message: Test Failover API called for node group Cache cluster message: Failover from primary node to replica node completed Replication group message: Failover from primary node to replica node completed Cache cluster message: Recovering cache nodes Cache cluster message: Finished recovery for cache nodes For more information see: Viewing ElastiCache Events in the ElastiCache User Guide DescribeEvents in the ElastiCache API Reference Also see, Testing Multi-AZ in the ElastiCache User Guide. /// /// Parameters: /// - nodeGroupId: The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 15 node groups in any rolling 24-hour period. @@ -3228,7 +3243,7 @@ extension ElastiCache { /// - Parameters: /// - cacheClusterId: The user-supplied cluster identifier. If this parameter is specified, only information about that specific cluster is returned. This parameter isn't case sensitive. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. - /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters. + /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this means Memcached and single node Valkey or Redis OSS clusters. /// - showCacheNodeInfo: An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. /// - logger: Logger used for logging @inlinable @@ -3269,7 +3284,7 @@ extension ElastiCache { /// Return PaginatorSequence for operation ``describeCacheEngineVersions(_:logger:)``. /// /// - Parameters: - /// - cacheParameterGroupFamily: The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens + /// - cacheParameterGroupFamily: The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 | valkey7 Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens /// - defaultOnly: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. /// - engine: The cache engine to return. Valid values: memcached | redis /// - engineVersion: The cache engine version to return. Example: 1.4.14 @@ -3730,10 +3745,10 @@ extension ElastiCache { /// Return PaginatorSequence for operation ``describeServerlessCacheSnapshots(_:logger:)``. /// /// - Parameters: - /// - maxResults: The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. - /// - serverlessCacheName: The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only. - /// - serverlessCacheSnapshotName: The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only. - /// - snapshotType: The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only. + /// - maxResults: The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Valkey, Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. + /// - serverlessCacheName: The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - serverlessCacheSnapshotName: The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Valkey, Redis OSS and Serverless Memcached only. + /// - snapshotType: The type of snapshot that is being described. Available for Valkey, Redis OSS and Serverless Memcached only. /// - logger: Logger used for logging @inlinable public func describeServerlessCacheSnapshotsPaginator( @@ -3900,7 +3915,7 @@ extension ElastiCache { /// /// - Parameters: /// - cacheClusterIds: The cache cluster IDs - /// - engine: The Elasticache engine to which the update applies. Either Redis OSS or Memcached. + /// - engine: The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached. /// - maxRecords: The maximum number of records to include in the response /// - replicationGroupIds: The replication group IDs /// - serviceUpdateName: The unique ID of the service update @@ -3994,7 +4009,7 @@ extension ElastiCache { /// Return PaginatorSequence for operation ``describeUsers(_:logger:)``. /// /// - Parameters: - /// - engine: The Redis OSS engine. + /// - engine: The engine. /// - filters: Filter to determine the list of User IDs to return. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. /// - userId: The ID of the user. @@ -4296,7 +4311,7 @@ extension ElastiCache { /// - cacheClusterId: The user-supplied cluster identifier. If this parameter is specified, only information about that specific cluster is returned. This parameter isn't case sensitive. /// - marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. - /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters. + /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this means Memcached and single node Valkey or Redis OSS clusters. /// - showCacheNodeInfo: An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. /// - logger: Logger used for logging @inlinable @@ -4351,7 +4366,7 @@ extension ElastiCache { /// - cacheClusterId: The user-supplied cluster identifier. If this parameter is specified, only information about that specific cluster is returned. This parameter isn't case sensitive. /// - marker: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. /// - maxRecords: The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. - /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters. + /// - showCacheClustersNotInReplicationGroups: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this means Memcached and single node Valkey or Redis OSS clusters. /// - showCacheNodeInfo: An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift b/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift index d18066fda7..ed637332c3 100644 --- a/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift +++ b/Sources/Soto/Services/ElastiCache/ElastiCache_shapes.swift @@ -248,10 +248,10 @@ extension ElastiCache { } public struct AllowedNodeTypeModificationsMessage: AWSDecodableShape { - /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. + /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling down a Valkey or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. @OptionalCustomCoding> public var scaleDownModifications: [String]? - /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling up a Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. + /// A string list, each element of which specifies a cache node type which you can use to scale your cluster or replication group. When scaling up a Valkey or Redis OSS cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from this list for the CacheNodeType parameter. @OptionalCustomCoding> public var scaleUpModifications: [String]? @@ -424,11 +424,11 @@ extension ElastiCache { public let arn: String? /// A flag that enables encryption at-rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let atRestEncryptionEnabled: Bool? - /// A flag that enables using an AuthToken (password) when issuing Redis OSS commands. Default: false + /// A flag that enables using an AuthToken (password) when issuing Valkey or Redis OSS commands. Default: false public let authTokenEnabled: Bool? /// The date the auth token was last modified public let authTokenLastModifiedDate: Date? - ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Valkey or Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The date and time when the cluster was created. public let cacheClusterCreateTime: Date? @@ -467,7 +467,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// Status of the cache parameter group. public let cacheParameterGroup: CacheParameterGroupStatus? @@ -484,16 +484,16 @@ extension ElastiCache { public let engine: String? /// The version of the cache engine that is used in this cluster. public let engineVersion: String? - /// The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type associated with the cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Returns the destination, format and type of the logs. @OptionalCustomCoding> public var logDeliveryConfigurations: [LogDeliveryConfiguration]? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let networkType: NetworkType? /// Describes a notification topic and its status. Notification topics are used for publishing ElastiCache events to subscribers using Amazon Simple Notification Service (SNS). public let notificationConfiguration: NotificationConfiguration? - /// The number of cache nodes in the cluster. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. + /// The number of cache nodes in the cluster. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. public let numCacheNodes: Int? public let pendingModifiedValues: PendingModifiedValues? /// The name of the Availability Zone in which the cluster is located or "Multiple" if the cache nodes are located in different Availability Zones. @@ -1016,7 +1016,7 @@ extension ElastiCache { /// A list of subnets associated with the cache subnet group. @OptionalCustomCoding> public var subnets: [Subnet]? - /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. @OptionalCustomCoding> public var supportedNetworkTypes: [NetworkType]? /// The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet group. @@ -1129,11 +1129,11 @@ extension ElastiCache { public struct _PreferredAvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "PreferredAvailabilityZone" } public struct _PreferredOutpostArnsEncoding: ArrayCoderProperties { public static let member = "PreferredOutpostArn" } - /// The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Redis OSS replication group you are working with. The minimum number of replicas in a shard or replication group is: Redis OSS (cluster mode disabled) If Multi-AZ: 1 If Multi-AZ: 0 Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) + /// The number of replicas you want in this node group at the end of this operation. The maximum value for NewReplicaCount is 5. The minimum value depends upon the type of Valkey or Redis OSS replication group you are working with. The minimum number of replicas in a shard or replication group is: Valkey or Redis OSS (cluster mode disabled) If Multi-AZ: 1 If Multi-AZ: 0 Valkey or Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) public let newReplicaCount: Int? - /// The 4-digit id for the node group you are configuring. For Redis OSS (cluster mode disabled) replication groups, the node group id is always 0001. To find a Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id. + /// The 4-digit id for the node group you are configuring. For Valkey or Redis OSS (cluster mode disabled) replication groups, the node group id is always 0001. To find a Valkey or Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's Id. public let nodeGroupId: String? - /// A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache (Redis OSS) selects the availability zone for each of the replicas. + /// A list of PreferredAvailabilityZone strings that specify which availability zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone values must equal the value of NewReplicaCount plus 1 to account for the primary node. If this member of ReplicaConfiguration is omitted, ElastiCache selects the availability zone for each of the replicas. @OptionalCustomCoding> public var preferredAvailabilityZones: [String]? /// The outpost ARNs in which the cache cluster is created. @@ -1165,14 +1165,14 @@ extension ElastiCache { public struct CopyServerlessCacheSnapshotRequest: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only. + /// The identifier of the KMS key used to encrypt the target snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. public let kmsKeyId: String? - /// The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only. + /// The identifier of the existing serverless cache’s snapshot to be copied. Available for Valkey, Redis OSS and Serverless Memcached only. public let sourceServerlessCacheSnapshotName: String? - /// A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL + /// A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL @OptionalCustomCoding> public var tags: [Tag]? - /// The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only. + /// The identifier for the snapshot to be created. Available for Valkey, Redis OSS and Serverless Memcached only. public let targetServerlessCacheSnapshotName: String? @inlinable @@ -1192,7 +1192,7 @@ extension ElastiCache { } public struct CopyServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The response for the attempt to copy the serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. + /// The response for the attempt to copy the serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? @inlinable @@ -1262,7 +1262,7 @@ extension ElastiCache { /// Reserved parameter. The password used to access a password protected server. Password constraints: Must be only printable ASCII characters. Must be at least 16 characters and no more than 128 characters in length. The only permitted printable special characters are !, &, #, $, ^, , and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. public let authToken: String? - ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. This parameter is only supported for Memcached clusters. If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. public let azMode: AZMode? @@ -1296,7 +1296,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster. public let cacheParameterGroupName: String? @@ -1309,16 +1309,16 @@ extension ElastiCache { public let engine: String? /// The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. public let engineVersion: String? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Specifies the destination, format and type of the logs. @OptionalCustomCoding> public var logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let networkType: NetworkType? /// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. public let notificationTopicArn: String? - /// The initial number of cache nodes that the cluster has. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. + /// The initial number of cache nodes that the cluster has. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. public let numCacheNodes: Int? /// Specifies whether the nodes in the cluster are created in a single outpost or across multiple outposts. public let outpostMode: OutpostMode? @@ -1341,10 +1341,10 @@ extension ElastiCache { /// One or more VPC security groups associated with the cluster. Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). @OptionalCustomCoding> public var securityGroupIds: [String]? - /// A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + /// A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb @OptionalCustomCoding> public var snapshotArns: [String]? - /// The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. + /// The name of a Valkey or Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. public let snapshotName: String? /// The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted. This parameter is only valid if the Engine parameter is redis. Default: 0 (i.e., automatic backups are disabled for this cache cluster). public let snapshotRetentionLimit: Int? @@ -1609,9 +1609,9 @@ extension ElastiCache { public let atRestEncryptionEnabled: Bool? /// Reserved parameter. The password used to access a password protected server. AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. Password constraints: Must be only printable ASCII characters. Must be at least 16 characters and no more than 128 characters in length. The only permitted printable special characters are !, &, #, $, ^, , and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. public let authToken: String? - /// Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false + /// Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster mode enabled) replication groups. Default: false public let automaticFailoverEnabled: Bool? - ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): /// @@ -1641,16 +1641,16 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? - /// The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + /// The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. If you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name. To create a Valkey or Redis OSS (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. To create a Valkey or Redis OSS (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. public let cacheParameterGroupName: String? /// A list of cache security group names to associate with this replication group. @OptionalCustomCoding> public var cacheSecurityGroupNames: [String]? /// The name of the cache subnet group to be used for the replication group. If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups. public let cacheSubnetGroupName: String? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering. public let dataTieringEnabled: Bool? @@ -1660,7 +1660,7 @@ extension ElastiCache { public let engineVersion: String? /// The name of the Global datastore public let globalReplicationGroupId: String? - /// The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when creating a replication group, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// The ID of the KMS key used to encrypt the disk in the cluster. public let kmsKeyId: String? @@ -1669,16 +1669,16 @@ extension ElastiCache { public var logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? /// A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ. public let multiAZEnabled: Bool? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let networkType: NetworkType? - /// A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group. + /// A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter. However, it is required when seeding a Valkey or Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group (shard) using this parameter because you must specify the slots for each node group. @OptionalCustomCoding> public var nodeGroupConfiguration: [NodeGroupConfiguration]? /// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. public let notificationTopicArn: String? /// The number of clusters this replication group initially has. This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead. If AutomaticFailoverEnabled is true, the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6. The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). public let numCacheClusters: Int? - /// An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 + /// An optional parameter that specifies the number of node groups (shards) for this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey or Redis OSS (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 public let numNodeGroups: Int? /// The port number on which each member of the replication group accepts connections. public let port: Int? @@ -1698,9 +1698,9 @@ extension ElastiCache { /// One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). @OptionalCustomCoding> public var securityGroupIds: [String]? - /// The name of the snapshot used to create a replication group. Available for Redis OSS only. + /// The name of the snapshot used to create a replication group. Available for Valkey, Redis OSS only. public let serverlessCacheSnapshotName: String? - /// A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + /// A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb @OptionalCustomCoding> public var snapshotArns: [String]? /// The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created. @@ -1714,7 +1714,7 @@ extension ElastiCache { public var tags: [Tag]? /// A flag that enables in-transit encryption when set to true. This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. public let transitEncryptionEnabled: Bool? - /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group. + /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. This process will not trigger the replacement of the replication group. public let transitEncryptionMode: TransitEncryptionMode? /// The user group to associate with the replication group. @OptionalCustomCoding> @@ -1838,7 +1838,7 @@ extension ElastiCache { /// Sets the cache usage limits for storage and ElastiCache Processing Units for the cache. public let cacheUsageLimits: CacheUsageLimits? - /// The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only. + /// The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be created on an automatic daily basis. Available for Valkey, Redis OSS and Serverless Memcached only. public let dailySnapshotTime: String? /// User-provided description for the serverless cache. The default is NULL, i.e. if no description is provided then an empty string will be returned. The maximum length is 255 characters. public let description: String? @@ -1853,10 +1853,10 @@ extension ElastiCache { public var securityGroupIds: [String]? /// User-provided identifier for the serverless cache. This parameter is stored as a lowercase string. public let serverlessCacheName: String? - /// The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only. + /// The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Valkey, Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var snapshotArnsToRestore: [String]? - /// The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only. + /// The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Valkey, Redis OSS and Serverless Memcached only. public let snapshotRetentionLimit: Int? /// A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. @OptionalCustomCoding> @@ -1864,7 +1864,7 @@ extension ElastiCache { /// The list of tags (key, value) pairs to be added to the serverless cache resource. Default is NULL. @OptionalCustomCoding> public var tags: [Tag]? - /// The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL. + /// The identifier of the UserGroup to be associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL. public let userGroupId: String? @inlinable @@ -1918,13 +1918,13 @@ extension ElastiCache { public struct CreateServerlessCacheSnapshotRequest: AWSEncodableShape { public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } - /// The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL + /// The ID of the KMS key used to encrypt the snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL public let kmsKeyId: String? - /// The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only. + /// The name of an existing serverless cache. The snapshot is created from this cache. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheName: String? - /// The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters. + /// The name for the snapshot being created. Must be unique for the customer account. Available for Valkey, Redis OSS and Serverless Memcached only. Must be between 1 and 255 characters. public let serverlessCacheSnapshotName: String? - /// A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. + /// A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Valkey, Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var tags: [Tag]? @@ -1945,7 +1945,7 @@ extension ElastiCache { } public struct CreateServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only. + /// The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? @inlinable @@ -2009,7 +2009,7 @@ extension ElastiCache { /// The current supported value is Redis user. public let engine: String? - /// A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Redis OSS only. + /// A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted. Available for Valkey and Redis OSS only. @OptionalCustomCoding> public var tags: [Tag]? /// The ID of the user group. @@ -2145,10 +2145,10 @@ extension ElastiCache { /// Indicates that the shard reconfiguration process begins immediately. At present, the only permitted value for this parameter is true. public let applyImmediately: Bool? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. @OptionalCustomCoding> public var globalNodeGroupsToRemove: [String]? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. ElastiCache will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. @OptionalCustomCoding> public var globalNodeGroupsToRetain: [String]? /// The name of the Global datastore @@ -2192,9 +2192,9 @@ extension ElastiCache { /// If True, the number of replica nodes is decreased immediately. ApplyImmediately=False is not currently supported. public let applyImmediately: Bool? - /// The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Redis OSS (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) + /// The number of read replica nodes you want at the completion of this operation. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Valkey or Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. The minimum number of replicas in a shard or replication group is: Valkey or Redis OSS (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Valkey or Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to a replica if your primary node fails) public let newReplicaCount: Int? - /// A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + /// A list of ConfigureShard objects that can be used to configure each shard in a Valkey or Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. @OptionalCustomCoding> public var replicaConfiguration: [ConfigureShard]? /// A list of the node ids to remove from the replication group or node group (shard). @@ -2380,7 +2380,7 @@ extension ElastiCache { } public struct DeleteServerlessCacheRequest: AWSEncodableShape { - /// Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken. + /// Name of the final snapshot to be taken before the serverless cache is deleted. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL, i.e. a final snapshot is not taken. public let finalSnapshotName: String? /// The identifier of the serverless cache to be deleted. public let serverlessCacheName: String? @@ -2412,7 +2412,7 @@ extension ElastiCache { } public struct DeleteServerlessCacheSnapshotRequest: AWSEncodableShape { - /// Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only. + /// Idenfitier of the snapshot to be deleted. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshotName: String? @inlinable @@ -2426,7 +2426,7 @@ extension ElastiCache { } public struct DeleteServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The snapshot to be deleted. Available for Redis OSS and Serverless Memcached only. + /// The snapshot to be deleted. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? @inlinable @@ -2506,7 +2506,7 @@ extension ElastiCache { public let marker: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a marker is included in the response so that the remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. public let maxRecords: Int? - /// An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this mean Memcached and single node Redis OSS clusters. + /// An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of a replication group. In practice, this means Memcached and single node Valkey or Redis OSS clusters. public let showCacheClustersNotInReplicationGroups: Bool? /// An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. public let showCacheNodeInfo: Bool? @@ -2530,7 +2530,7 @@ extension ElastiCache { } public struct DescribeCacheEngineVersionsMessage: AWSEncodableShape { - /// The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens + /// The name of a specific cache parameter group family to return details for. Valid values are: memcached1.4 | memcached1.5 | memcached1.6 | redis2.6 | redis2.8 | redis3.2 | redis4.0 | redis5.0 | redis6.x | redis6.2 | redis7 | valkey7 Constraints: Must be 1 to 255 alphanumeric characters First character must be a letter Cannot end with a hyphen or contain two consecutive hyphens public let cacheParameterGroupFamily: String? /// If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. public let defaultOnly: Bool? @@ -2826,7 +2826,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 public let duration: String? @@ -2896,7 +2896,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration. Valid Values: 1 | 3 | 31536000 | 94608000 public let duration: String? @@ -2934,15 +2934,15 @@ extension ElastiCache { } public struct DescribeServerlessCacheSnapshotsRequest: AWSEncodableShape { - /// The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. + /// The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that remaining results can be retrieved. Available for Valkey, Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50. public let maxResults: Int? - /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only. + /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Valkey, Redis OSS and Serverless Memcached only. public let nextToken: String? - /// The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only. + /// The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheName: String? - /// The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only. + /// The identifier of the serverless cache’s snapshot. If this parameter is specified, only this snapshot is described. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshotName: String? - /// The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only. + /// The type of snapshot that is being described. Available for Valkey, Redis OSS and Serverless Memcached only. public let snapshotType: String? @inlinable @@ -2966,9 +2966,9 @@ extension ElastiCache { public struct DescribeServerlessCacheSnapshotsResponse: AWSDecodableShape { public struct _ServerlessCacheSnapshotsEncoding: ArrayCoderProperties { public static let member = "ServerlessCacheSnapshot" } - /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only. + /// An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Valkey, Redis OSS and Serverless Memcached only. public let nextToken: String? - /// The serverless caches snapshots associated with a given description request. Available for Redis OSS and Serverless Memcached only. + /// The serverless caches snapshots associated with a given description request. Available for Valkey, Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var serverlessCacheSnapshots: [ServerlessCacheSnapshot]? @@ -3119,7 +3119,7 @@ extension ElastiCache { /// The cache cluster IDs @OptionalCustomCoding> public var cacheClusterIds: [String]? - /// The Elasticache engine to which the update applies. Either Redis OSS or Memcached. + /// The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached. public let engine: String? /// An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. public let marker: String? @@ -3218,7 +3218,7 @@ extension ElastiCache { } public struct DescribeUsersMessage: AWSEncodableShape { - /// The Redis OSS engine. + /// The engine. public let engine: String? /// Filter to determine the list of User IDs to return. @OptionalCustomCoding> @@ -3466,9 +3466,9 @@ extension ElastiCache { } public struct ExportServerlessCacheSnapshotRequest: AWSEncodableShape { - /// Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Redis OSS only. + /// Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region as the snapshot. Available for Valkey and Redis OSS only. public let s3BucketName: String? - /// The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only. + /// The identifier of the serverless cache snapshot to be exported to S3. Available for Valkey and Redis OSS only. public let serverlessCacheSnapshotName: String? @inlinable @@ -3484,7 +3484,7 @@ extension ElastiCache { } public struct ExportServerlessCacheSnapshotResponse: AWSDecodableShape { - /// The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only. + /// The state of a serverless cache at a specific point in time, to the millisecond. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshot: ServerlessCacheSnapshot? @inlinable @@ -3585,15 +3585,15 @@ extension ElastiCache { public let arn: String? /// A flag that enables encryption at rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. public let atRestEncryptionEnabled: Bool? - /// A flag that enables using an AuthToken (password) when issuing Redis OSS commands. Default: false + /// A flag that enables using an AuthToken (password) when issuing Valkey or Redis OSS commands. Default: false public let authTokenEnabled: Bool? /// The cache node type of the Global datastore public let cacheNodeType: String? /// A flag that indicates whether the Global datastore is cluster enabled. public let clusterEnabled: Bool? - /// The Elasticache engine. For Redis OSS only. + /// The ElastiCache engine. For Valkey or Redis OSS only. public let engine: String? - /// The Elasticache (Redis OSS) engine version. + /// The ElastiCache engine version. public let engineVersion: String? /// Indicates the slot configuration and global identifier for each slice group. @OptionalCustomCoding> @@ -3745,9 +3745,9 @@ extension ElastiCache { /// If True, the number of replica nodes is increased immediately. ApplyImmediately=False is not currently supported. public let applyImmediately: Bool? - /// The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. + /// The number of read replica nodes you want at the completion of this operation. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in the replication group. For Valkey or Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. public let newReplicaCount: Int? - /// A list of ConfigureShard objects that can be used to configure each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + /// A list of ConfigureShard objects that can be used to configure each shard in a Valkey or Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. @OptionalCustomCoding> public var replicaConfiguration: [ConfigureShard]? /// The id of the replication group to which you want to add replica nodes. @@ -3909,9 +3909,9 @@ extension ElastiCache { public let applyImmediately: Bool? /// Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. public let authToken: String? - /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH + /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with AUTH public let authTokenUpdateStrategy: AuthTokenUpdateStrategyType? - ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Valkey 7.2 or Redis OSS engine version 6.0 or later, set this parameter to yes to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// Specifies whether the new nodes in this Memcached cluster are all created in a single Availability Zone or created across multiple Availability Zones. Valid values: single-az | cross-az. This option is only supported for Memcached clusters. You cannot specify single-az if the Memcached cluster already has cache nodes in different Availability Zones. If cross-az is specified, existing Memcached nodes remain in their current Availability Zone. Only newly created nodes are located in different Availability Zones. public let azMode: AZMode? @@ -3927,9 +3927,11 @@ extension ElastiCache { /// A list of cache security group names to authorize on this cluster. This change is asynchronously applied as soon as possible. You can use this parameter only with clusters that are created outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be "Default". @OptionalCustomCoding> public var cacheSecurityGroupNames: [String]? + /// Modifies the engine listed in a cluster message. The options are redis, memcached or valkey. + public let engine: String? /// The upgraded version of the cache engine to be run on the cache nodes. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. public let engineVersion: String? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Specifies the destination, format and type of the logs. @OptionalCustomCoding> @@ -3941,7 +3943,7 @@ extension ElastiCache { public let notificationTopicArn: String? /// The status of the Amazon SNS notification topic. Notifications are sent only if the status is active. Valid values: active | inactive public let notificationTopicStatus: String? - /// The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster. + /// The number of cache nodes that the cluster should have. If the value for NumCacheNodes is greater than the sum of the number of current cache nodes and the number of cache nodes pending creation (which may be zero), more nodes are added. If the value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter to provide the IDs of the specific cache nodes to remove. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in accordance with the scale out architecture, is not queued. The customer's latest request to add or remove nodes to the cluster overrides any previous pending operations to modify the number of cache nodes in the cluster. For example, a request to remove 2 nodes would override a previous pending operation to remove 3 nodes. Similarly, a request to add 2 nodes would override a previous pending operation to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned in different Availability Zones with flexible cache node placement, a request to add nodes does not automatically override a previous pending operation to add nodes. The customer can modify the previous pending operation to add more nodes or explicitly cancel the pending request and retry the new request. To cancel pending operations to modify the number of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes equal to the number of cache nodes currently in the cluster. public let numCacheNodes: Int? /// Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 public let preferredMaintenanceWindow: String? @@ -3954,7 +3956,7 @@ extension ElastiCache { public let snapshotWindow: String? @inlinable - public init(applyImmediately: Bool? = nil, authToken: String? = nil, authTokenUpdateStrategy: AuthTokenUpdateStrategyType? = nil, autoMinorVersionUpgrade: Bool? = nil, azMode: AZMode? = nil, cacheClusterId: String? = nil, cacheNodeIdsToRemove: [String]? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, newAvailabilityZones: [String]? = nil, notificationTopicArn: String? = nil, notificationTopicStatus: String? = nil, numCacheNodes: Int? = nil, preferredMaintenanceWindow: String? = nil, securityGroupIds: [String]? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil) { + public init(applyImmediately: Bool? = nil, authToken: String? = nil, authTokenUpdateStrategy: AuthTokenUpdateStrategyType? = nil, autoMinorVersionUpgrade: Bool? = nil, azMode: AZMode? = nil, cacheClusterId: String? = nil, cacheNodeIdsToRemove: [String]? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, engine: String? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, newAvailabilityZones: [String]? = nil, notificationTopicArn: String? = nil, notificationTopicStatus: String? = nil, numCacheNodes: Int? = nil, preferredMaintenanceWindow: String? = nil, securityGroupIds: [String]? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil) { self.applyImmediately = applyImmediately self.authToken = authToken self.authTokenUpdateStrategy = authTokenUpdateStrategy @@ -3965,6 +3967,7 @@ extension ElastiCache { self.cacheNodeType = cacheNodeType self.cacheParameterGroupName = cacheParameterGroupName self.cacheSecurityGroupNames = cacheSecurityGroupNames + self.engine = engine self.engineVersion = engineVersion self.ipDiscovery = ipDiscovery self.logDeliveryConfigurations = logDeliveryConfigurations @@ -3989,6 +3992,7 @@ extension ElastiCache { case cacheNodeType = "CacheNodeType" case cacheParameterGroupName = "CacheParameterGroupName" case cacheSecurityGroupNames = "CacheSecurityGroupNames" + case engine = "Engine" case engineVersion = "EngineVersion" case ipDiscovery = "IpDiscovery" case logDeliveryConfigurations = "LogDeliveryConfigurations" @@ -4084,6 +4088,8 @@ extension ElastiCache { public let cacheNodeType: String? /// The name of the cache parameter group to use with the Global datastore. It must be compatible with the major engine version used by the Global datastore. public let cacheParameterGroupName: String? + /// Modifies the engine listed in a global replication group message. The options are redis, memcached or valkey. + public let engine: String? /// The upgraded version of the cache engine to be run on the clusters in the Global datastore. public let engineVersion: String? /// A description of the Global datastore @@ -4092,11 +4098,12 @@ extension ElastiCache { public let globalReplicationGroupId: String? @inlinable - public init(applyImmediately: Bool? = nil, automaticFailoverEnabled: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, engineVersion: String? = nil, globalReplicationGroupDescription: String? = nil, globalReplicationGroupId: String? = nil) { + public init(applyImmediately: Bool? = nil, automaticFailoverEnabled: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, engine: String? = nil, engineVersion: String? = nil, globalReplicationGroupDescription: String? = nil, globalReplicationGroupId: String? = nil) { self.applyImmediately = applyImmediately self.automaticFailoverEnabled = automaticFailoverEnabled self.cacheNodeType = cacheNodeType self.cacheParameterGroupName = cacheParameterGroupName + self.engine = engine self.engineVersion = engineVersion self.globalReplicationGroupDescription = globalReplicationGroupDescription self.globalReplicationGroupId = globalReplicationGroupId @@ -4107,6 +4114,7 @@ extension ElastiCache { case automaticFailoverEnabled = "AutomaticFailoverEnabled" case cacheNodeType = "CacheNodeType" case cacheParameterGroupName = "CacheParameterGroupName" + case engine = "Engine" case engineVersion = "EngineVersion" case globalReplicationGroupDescription = "GlobalReplicationGroupDescription" case globalReplicationGroupId = "GlobalReplicationGroupId" @@ -4135,11 +4143,11 @@ extension ElastiCache { public let applyImmediately: Bool? /// Reserved parameter. The password used to access a password protected server. This parameter must be specified with the auth-token-update-strategy parameter. Password constraints: Must be only printable ASCII characters Must be at least 16 characters and no more than 128 characters in length Cannot contain any of the following characters: '/', '"', or '@', '%' For more information, see AUTH password at AUTH. public let authToken: String? - /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with Redis OSS AUTH + /// Specifies the strategy to use to update the AUTH token. This parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users with AUTH public let authTokenUpdateStrategy: AuthTokenUpdateStrategyType? /// Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | false public let automaticFailoverEnabled: Bool? - ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Valkey or Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// A valid cache node type that you want to scale this replication group to. public let cacheNodeType: String? @@ -4148,11 +4156,13 @@ extension ElastiCache { /// A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible. This parameter can be used only with replication group containing clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default. @OptionalCustomCoding> public var cacheSecurityGroupNames: [String]? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? + /// Modifies the engine listed in a replication group message. The options are redis, memcached or valkey. + public let engine: String? /// The upgraded version of the cache engine to be run on the clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. public let engineVersion: String? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// Specifies the destination, format and type of the logs. @OptionalCustomCoding> @@ -4180,13 +4190,13 @@ extension ElastiCache { public var securityGroupIds: [String]? /// The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted. Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off. public let snapshotRetentionLimit: Int? - /// The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups. + /// The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Valkey or Redis OSS (cluster mode enabled) replication groups. public let snapshottingClusterId: String? /// The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId. Example: 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. public let snapshotWindow: String? /// A flag that enables in-transit encryption when set to true. If you are enabling in-transit encryption for an existing cluster, you must also set TransitEncryptionMode to preferred. public let transitEncryptionEnabled: Bool? - /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. + /// A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can set the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can set TransitEncryptionMode to required. public let transitEncryptionMode: TransitEncryptionMode? /// The ID of the user group you are associating with the replication group. @OptionalCustomCoding> @@ -4196,7 +4206,7 @@ extension ElastiCache { public var userGroupIdsToRemove: [String]? @inlinable - public init(applyImmediately: Bool? = nil, authToken: String? = nil, authTokenUpdateStrategy: AuthTokenUpdateStrategyType? = nil, automaticFailoverEnabled: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, clusterMode: ClusterMode? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, multiAZEnabled: Bool? = nil, notificationTopicArn: String? = nil, notificationTopicStatus: String? = nil, preferredMaintenanceWindow: String? = nil, primaryClusterId: String? = nil, removeUserGroups: Bool? = nil, replicationGroupDescription: String? = nil, replicationGroupId: String? = nil, securityGroupIds: [String]? = nil, snapshotRetentionLimit: Int? = nil, snapshottingClusterId: String? = nil, snapshotWindow: String? = nil, transitEncryptionEnabled: Bool? = nil, transitEncryptionMode: TransitEncryptionMode? = nil, userGroupIdsToAdd: [String]? = nil, userGroupIdsToRemove: [String]? = nil) { + public init(applyImmediately: Bool? = nil, authToken: String? = nil, authTokenUpdateStrategy: AuthTokenUpdateStrategyType? = nil, automaticFailoverEnabled: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, clusterMode: ClusterMode? = nil, engine: String? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, multiAZEnabled: Bool? = nil, notificationTopicArn: String? = nil, notificationTopicStatus: String? = nil, preferredMaintenanceWindow: String? = nil, primaryClusterId: String? = nil, removeUserGroups: Bool? = nil, replicationGroupDescription: String? = nil, replicationGroupId: String? = nil, securityGroupIds: [String]? = nil, snapshotRetentionLimit: Int? = nil, snapshottingClusterId: String? = nil, snapshotWindow: String? = nil, transitEncryptionEnabled: Bool? = nil, transitEncryptionMode: TransitEncryptionMode? = nil, userGroupIdsToAdd: [String]? = nil, userGroupIdsToRemove: [String]? = nil) { self.applyImmediately = applyImmediately self.authToken = authToken self.authTokenUpdateStrategy = authTokenUpdateStrategy @@ -4206,6 +4216,7 @@ extension ElastiCache { self.cacheParameterGroupName = cacheParameterGroupName self.cacheSecurityGroupNames = cacheSecurityGroupNames self.clusterMode = clusterMode + self.engine = engine self.engineVersion = engineVersion self.ipDiscovery = ipDiscovery self.logDeliveryConfigurations = logDeliveryConfigurations @@ -4230,7 +4241,7 @@ extension ElastiCache { @available(*, deprecated, message: "Members nodeGroupId have been deprecated") @inlinable - public init(applyImmediately: Bool? = nil, authToken: String? = nil, authTokenUpdateStrategy: AuthTokenUpdateStrategyType? = nil, automaticFailoverEnabled: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, clusterMode: ClusterMode? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, multiAZEnabled: Bool? = nil, nodeGroupId: String? = nil, notificationTopicArn: String? = nil, notificationTopicStatus: String? = nil, preferredMaintenanceWindow: String? = nil, primaryClusterId: String? = nil, removeUserGroups: Bool? = nil, replicationGroupDescription: String? = nil, replicationGroupId: String? = nil, securityGroupIds: [String]? = nil, snapshotRetentionLimit: Int? = nil, snapshottingClusterId: String? = nil, snapshotWindow: String? = nil, transitEncryptionEnabled: Bool? = nil, transitEncryptionMode: TransitEncryptionMode? = nil, userGroupIdsToAdd: [String]? = nil, userGroupIdsToRemove: [String]? = nil) { + public init(applyImmediately: Bool? = nil, authToken: String? = nil, authTokenUpdateStrategy: AuthTokenUpdateStrategyType? = nil, automaticFailoverEnabled: Bool? = nil, autoMinorVersionUpgrade: Bool? = nil, cacheNodeType: String? = nil, cacheParameterGroupName: String? = nil, cacheSecurityGroupNames: [String]? = nil, clusterMode: ClusterMode? = nil, engine: String? = nil, engineVersion: String? = nil, ipDiscovery: IpDiscovery? = nil, logDeliveryConfigurations: [LogDeliveryConfigurationRequest]? = nil, multiAZEnabled: Bool? = nil, nodeGroupId: String? = nil, notificationTopicArn: String? = nil, notificationTopicStatus: String? = nil, preferredMaintenanceWindow: String? = nil, primaryClusterId: String? = nil, removeUserGroups: Bool? = nil, replicationGroupDescription: String? = nil, replicationGroupId: String? = nil, securityGroupIds: [String]? = nil, snapshotRetentionLimit: Int? = nil, snapshottingClusterId: String? = nil, snapshotWindow: String? = nil, transitEncryptionEnabled: Bool? = nil, transitEncryptionMode: TransitEncryptionMode? = nil, userGroupIdsToAdd: [String]? = nil, userGroupIdsToRemove: [String]? = nil) { self.applyImmediately = applyImmediately self.authToken = authToken self.authTokenUpdateStrategy = authTokenUpdateStrategy @@ -4240,6 +4251,7 @@ extension ElastiCache { self.cacheParameterGroupName = cacheParameterGroupName self.cacheSecurityGroupNames = cacheSecurityGroupNames self.clusterMode = clusterMode + self.engine = engine self.engineVersion = engineVersion self.ipDiscovery = ipDiscovery self.logDeliveryConfigurations = logDeliveryConfigurations @@ -4283,6 +4295,7 @@ extension ElastiCache { case cacheParameterGroupName = "CacheParameterGroupName" case cacheSecurityGroupNames = "CacheSecurityGroupNames" case clusterMode = "ClusterMode" + case engine = "Engine" case engineVersion = "EngineVersion" case ipDiscovery = "IpDiscovery" case logDeliveryConfigurations = "LogDeliveryConfigurations" @@ -4328,13 +4341,13 @@ extension ElastiCache { public let applyImmediately: Bool? /// The number of node groups (shards) that results from the modification of the shard configuration. public let nodeGroupCount: Int? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. @OptionalCustomCoding> public var nodeGroupsToRemove: [String]? - /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. + /// If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. @OptionalCustomCoding> public var nodeGroupsToRetain: [String]? - /// The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured. + /// The name of the Valkey or Redis OSS (cluster mode enabled) cluster (replication group) on which the shards are to be configured. public let replicationGroupId: String? /// Specifies the preferred availability zones for each node group in the cluster. If the value of NodeGroupCount is greater than the current number of node groups (shards), you can use this parameter to specify the preferred availability zones of the cluster's shards. If you omit this parameter ElastiCache selects availability zones for you. You can specify this parameter only if the value of NodeGroupCount is greater than the current number of node groups (shards). @OptionalCustomCoding> @@ -4394,27 +4407,33 @@ extension ElastiCache { /// Modify the cache usage limit for the serverless cache. public let cacheUsageLimits: CacheUsageLimits? - /// The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed. + /// The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only. The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed. public let dailySnapshotTime: String? /// User provided description for the serverless cache. Default = NULL, i.e. the existing description is not removed/modified. The description has a maximum length of 255 characters. public let description: String? - /// The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL. + /// Modifies the engine listed in a serverless cache request. The options are redis, memcached or valkey. + public let engine: String? + /// Modifies the engine vesion listed in a serverless cache request. + public let majorEngineVersion: String? + /// The identifier of the UserGroup to be removed from association with the Valkey and Redis OSS serverless cache. Available for Valkey and Redis OSS only. Default is NULL. public let removeUserGroup: Bool? /// The new list of VPC security groups to be associated with the serverless cache. Populating this list means the current VPC security groups will be removed. This security group is used to authorize traffic access for the VPC end-point (private-link). Default = NULL - the existing list of VPC security groups is not removed. @OptionalCustomCoding> public var securityGroupIds: [String]? /// User-provided identifier for the serverless cache to be modified. public let serverlessCacheName: String? - /// The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days. + /// The number of days for which Elasticache retains automatic snapshots before deleting them. Available for Valkey, Redis OSS and Serverless Memcached only. Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 days. public let snapshotRetentionLimit: Int? - /// The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not removed. + /// The identifier of the UserGroup to be associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL - the existing UserGroup is not removed. public let userGroupId: String? @inlinable - public init(cacheUsageLimits: CacheUsageLimits? = nil, dailySnapshotTime: String? = nil, description: String? = nil, removeUserGroup: Bool? = nil, securityGroupIds: [String]? = nil, serverlessCacheName: String? = nil, snapshotRetentionLimit: Int? = nil, userGroupId: String? = nil) { + public init(cacheUsageLimits: CacheUsageLimits? = nil, dailySnapshotTime: String? = nil, description: String? = nil, engine: String? = nil, majorEngineVersion: String? = nil, removeUserGroup: Bool? = nil, securityGroupIds: [String]? = nil, serverlessCacheName: String? = nil, snapshotRetentionLimit: Int? = nil, userGroupId: String? = nil) { self.cacheUsageLimits = cacheUsageLimits self.dailySnapshotTime = dailySnapshotTime self.description = description + self.engine = engine + self.majorEngineVersion = majorEngineVersion self.removeUserGroup = removeUserGroup self.securityGroupIds = securityGroupIds self.serverlessCacheName = serverlessCacheName @@ -4426,6 +4445,8 @@ extension ElastiCache { case cacheUsageLimits = "CacheUsageLimits" case dailySnapshotTime = "DailySnapshotTime" case description = "Description" + case engine = "Engine" + case majorEngineVersion = "MajorEngineVersion" case removeUserGroup = "RemoveUserGroup" case securityGroupIds = "SecurityGroupIds" case serverlessCacheName = "ServerlessCacheName" @@ -4532,7 +4553,7 @@ extension ElastiCache { public struct NodeGroup: AWSDecodableShape { public struct _NodeGroupMembersEncoding: ArrayCoderProperties { public static let member = "NodeGroupMember" } - /// The identifier for the node group (shard). A Redis OSS (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group. + /// The identifier for the node group (shard). A Valkey or Redis OSS (cluster mode disabled) replication group contains only 1 node group; therefore, the node group ID is 0001. A Valkey or Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090. Optionally, the user can provide the id for a node group. public let nodeGroupId: String? /// A list containing information about individual nodes within the node group (shard). @OptionalCustomCoding> @@ -4570,7 +4591,7 @@ extension ElastiCache { public struct _ReplicaAvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "AvailabilityZone" } public struct _ReplicaOutpostArnsEncoding: ArrayCoderProperties { public static let member = "OutpostArn" } - /// Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to. + /// Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to. public let nodeGroupId: String? /// The Availability Zone where the primary node of this node group (shard) is launched. public let primaryAvailabilityZone: String? @@ -4620,13 +4641,13 @@ extension ElastiCache { public let cacheClusterId: String? /// The ID of the node within its cluster. A node ID is a numeric identifier (0001, 0002, etc.). public let cacheNodeId: String? - /// The role that is currently assigned to the node - primary or replica. This member is only applicable for Redis OSS (cluster mode disabled) replication groups. + /// The role that is currently assigned to the node - primary or replica. This member is only applicable for Valkey or Redis OSS (cluster mode disabled) replication groups. public let currentRole: String? /// The name of the Availability Zone in which the node is located. public let preferredAvailabilityZone: String? /// The outpost ARN of the node group member. public let preferredOutpostArn: String? - /// The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Redis OSS (cluster mode disabled) clusters. + /// The information required for client programs to connect to a node for read operations. The read endpoint is only applicable on Valkey or Redis OSS (cluster mode disabled) clusters. public let readEndpoint: Endpoint? @inlinable @@ -4877,7 +4898,7 @@ extension ElastiCache { /// The log delivery configurations being modified @OptionalCustomCoding> public var logDeliveryConfigurations: [PendingLogDeliveryConfiguration]? - /// The new number of cache nodes for the cluster. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. + /// The new number of cache nodes for the cluster. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. public let numCacheNodes: Int? /// A flag that enables in-transit encryption when set to true. public let transitEncryptionEnabled: Bool? @@ -4915,7 +4936,7 @@ extension ElastiCache { public let replicationGroupId: String? /// The unique ID of the service update public let serviceUpdateName: String? - /// The status of the update action on the Redis OSS cluster + /// The status of the update action on the Valkey or Redis OSS cluster public let updateActionStatus: UpdateActionStatus? @inlinable @@ -5119,19 +5140,19 @@ extension ElastiCache { public let arn: String? /// A flag that enables encryption at-rest when set to true. You cannot modify the value of AtRestEncryptionEnabled after the cluster is created. To enable encryption at-rest on a cluster you must set AtRestEncryptionEnabled to true when you create a cluster. Required: Only available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: false public let atRestEncryptionEnabled: Bool? - /// A flag that enables using an AuthToken (password) when issuing Redis OSS commands. Default: false + /// A flag that enables using an AuthToken (password) when issuing Valkey or Redis OSS commands. Default: false public let authTokenEnabled: Bool? /// The date the auth token was last modified public let authTokenLastModifiedDate: Date? - /// Indicates the status of automatic failover for this Redis OSS replication group. + /// Indicates the status of automatic failover for this Valkey or Redis OSS replication group. public let automaticFailover: AutomaticFailoverStatus? - /// If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + /// If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 and above, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The name of the compute and memory capacity node type for each node in the replication group. public let cacheNodeType: String? /// A flag indicating whether or not this replication group is cluster enabled; i.e., whether its data can be partitioned across multiple shards (API/CLI: node groups). Valid values: true | false public let clusterEnabled: Bool? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// The configuration endpoint for this replication group. Use the configuration endpoint to connect to this replication group. public let configurationEndpoint: Endpoint? @@ -5139,9 +5160,11 @@ extension ElastiCache { public let dataTiering: DataTieringStatus? /// The user supplied description of the replication group. public let description: String? + /// The engine used in a replication group. The options are redis, memcached or valkey. + public let engine: String? /// The name of the Global datastore and role of this replication group in the Global datastore. public let globalReplicationGroupInfo: GlobalReplicationGroupInfo? - /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// The network type you choose when modifying a cluster, either ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let ipDiscovery: IpDiscovery? /// The ID of the KMS key used to encrypt the disk in the cluster. public let kmsKeyId: String? @@ -5156,9 +5179,9 @@ extension ElastiCache { public var memberClustersOutpostArns: [String]? /// A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ public let multiAZ: MultiAZStatus? - /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. public let networkType: NetworkType? - /// A list of node groups in this replication group. For Redis OSS (cluster mode disabled) replication groups, this is a single-element list. For Redis OSS (cluster mode enabled) replication groups, the list contains an entry for each node group (shard). + /// A list of node groups in this replication group. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is a single-element list. For Valkey or Redis OSS (cluster mode enabled) replication groups, the list contains an entry for each node group (shard). @OptionalCustomCoding> public var nodeGroups: [NodeGroup]? /// A group of settings to be applied to the replication group, either immediately or during the next maintenance window. @@ -5184,7 +5207,7 @@ extension ElastiCache { public var userGroupIds: [String]? @inlinable - public init(arn: String? = nil, atRestEncryptionEnabled: Bool? = nil, authTokenEnabled: Bool? = nil, authTokenLastModifiedDate: Date? = nil, automaticFailover: AutomaticFailoverStatus? = nil, autoMinorVersionUpgrade: Bool? = nil, cacheNodeType: String? = nil, clusterEnabled: Bool? = nil, clusterMode: ClusterMode? = nil, configurationEndpoint: Endpoint? = nil, dataTiering: DataTieringStatus? = nil, description: String? = nil, globalReplicationGroupInfo: GlobalReplicationGroupInfo? = nil, ipDiscovery: IpDiscovery? = nil, kmsKeyId: String? = nil, logDeliveryConfigurations: [LogDeliveryConfiguration]? = nil, memberClusters: [String]? = nil, memberClustersOutpostArns: [String]? = nil, multiAZ: MultiAZStatus? = nil, networkType: NetworkType? = nil, nodeGroups: [NodeGroup]? = nil, pendingModifiedValues: ReplicationGroupPendingModifiedValues? = nil, replicationGroupCreateTime: Date? = nil, replicationGroupId: String? = nil, snapshotRetentionLimit: Int? = nil, snapshottingClusterId: String? = nil, snapshotWindow: String? = nil, status: String? = nil, transitEncryptionEnabled: Bool? = nil, transitEncryptionMode: TransitEncryptionMode? = nil, userGroupIds: [String]? = nil) { + public init(arn: String? = nil, atRestEncryptionEnabled: Bool? = nil, authTokenEnabled: Bool? = nil, authTokenLastModifiedDate: Date? = nil, automaticFailover: AutomaticFailoverStatus? = nil, autoMinorVersionUpgrade: Bool? = nil, cacheNodeType: String? = nil, clusterEnabled: Bool? = nil, clusterMode: ClusterMode? = nil, configurationEndpoint: Endpoint? = nil, dataTiering: DataTieringStatus? = nil, description: String? = nil, engine: String? = nil, globalReplicationGroupInfo: GlobalReplicationGroupInfo? = nil, ipDiscovery: IpDiscovery? = nil, kmsKeyId: String? = nil, logDeliveryConfigurations: [LogDeliveryConfiguration]? = nil, memberClusters: [String]? = nil, memberClustersOutpostArns: [String]? = nil, multiAZ: MultiAZStatus? = nil, networkType: NetworkType? = nil, nodeGroups: [NodeGroup]? = nil, pendingModifiedValues: ReplicationGroupPendingModifiedValues? = nil, replicationGroupCreateTime: Date? = nil, replicationGroupId: String? = nil, snapshotRetentionLimit: Int? = nil, snapshottingClusterId: String? = nil, snapshotWindow: String? = nil, status: String? = nil, transitEncryptionEnabled: Bool? = nil, transitEncryptionMode: TransitEncryptionMode? = nil, userGroupIds: [String]? = nil) { self.arn = arn self.atRestEncryptionEnabled = atRestEncryptionEnabled self.authTokenEnabled = authTokenEnabled @@ -5197,6 +5220,7 @@ extension ElastiCache { self.configurationEndpoint = configurationEndpoint self.dataTiering = dataTiering self.description = description + self.engine = engine self.globalReplicationGroupInfo = globalReplicationGroupInfo self.ipDiscovery = ipDiscovery self.kmsKeyId = kmsKeyId @@ -5231,6 +5255,7 @@ extension ElastiCache { case configurationEndpoint = "ConfigurationEndpoint" case dataTiering = "DataTiering" case description = "Description" + case engine = "Engine" case globalReplicationGroupInfo = "GlobalReplicationGroupInfo" case ipDiscovery = "IpDiscovery" case kmsKeyId = "KmsKeyId" @@ -5277,9 +5302,9 @@ extension ElastiCache { public struct ReplicationGroupPendingModifiedValues: AWSDecodableShape { /// The auth token status public let authTokenStatus: AuthTokenUpdateStatus? - /// Indicates the status of automatic failover for this Redis OSS replication group. + /// Indicates the status of automatic failover for this Valkey or Redis OSS replication group. public let automaticFailoverStatus: PendingAutomaticFailoverStatus? - /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. + /// Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. public let clusterMode: ClusterMode? /// The log delivery configurations being modified @OptionalCustomCoding> @@ -5354,7 +5379,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The duration of the reservation in seconds. public let duration: Int? @@ -5466,7 +5491,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The duration of the offering. in seconds. public let duration: Int? @@ -5557,7 +5582,7 @@ extension ElastiCache { public struct ReshardingConfiguration: AWSEncodableShape { public struct _PreferredAvailabilityZonesEncoding: ArrayCoderProperties { public static let member = "AvailabilityZone" } - /// Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to. + /// Either the ElastiCache supplied 4-digit id or a user supplied id for the node group these configuration values apply to. public let nodeGroupId: String? /// A list of preferred availability zones for the nodes in this cluster. @OptionalCustomCoding> @@ -5658,7 +5683,7 @@ extension ElastiCache { public let cacheUsageLimits: CacheUsageLimits? /// When the serverless cache was created. public let createTime: Date? - /// The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only. + /// The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Valkey, Redis OSS and Serverless Memcached only. public let dailySnapshotTime: String? /// A description of the serverless cache. public let description: String? @@ -5677,14 +5702,14 @@ extension ElastiCache { public var securityGroupIds: [String]? /// The unique identifier of the serverless cache. public let serverlessCacheName: String? - /// The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only. + /// The current setting for the number of serverless cache snapshots the system will retain. Available for Valkey, Redis OSS and Serverless Memcached only. public let snapshotRetentionLimit: Int? /// The current status of the serverless cache. The allowed values are CREATING, AVAILABLE, DELETING, CREATE-FAILED and MODIFYING. public let status: String? /// If no subnet IDs are given and your VPC is in us-west-1, then ElastiCache will select 2 default subnets across AZs in your VPC. For all other Regions, if no subnet IDs are given then ElastiCache will select 3 default subnets across AZs in your default VPC. @OptionalCustomCoding> public var subnetIds: [String]? - /// The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL. + /// The identifier of the user group associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL. public let userGroupId: String? @inlinable @@ -5752,23 +5777,23 @@ extension ElastiCache { } public struct ServerlessCacheSnapshot: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. + /// The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. public let arn: String? - /// The total size of a serverless cache snapshot, in bytes. Available for Redis OSS and Serverless Memcached only. + /// The total size of a serverless cache snapshot, in bytes. Available for Valkey, Redis OSS and Serverless Memcached only. public let bytesUsedForCache: String? - /// The date and time that the source serverless cache's metadata and cache data set was obtained for the snapshot. Available for Redis OSS and Serverless Memcached only. + /// The date and time that the source serverless cache's metadata and cache data set was obtained for the snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. public let createTime: Date? - /// The time that the serverless cache snapshot will expire. Available for Redis OSS and Serverless Memcached only. + /// The time that the serverless cache snapshot will expire. Available for Valkey, Redis OSS and Serverless Memcached only. public let expiryTime: Date? - /// The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. + /// The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. public let kmsKeyId: String? - /// The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis OSS and Serverless Memcached only. + /// The configuration of the serverless cache, at the time the snapshot was taken. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheConfiguration: ServerlessCacheConfiguration? - /// The identifier of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only. + /// The identifier of a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. public let serverlessCacheSnapshotName: String? - /// The type of snapshot of serverless cache. Available for Redis OSS and Serverless Memcached only. + /// The type of snapshot of serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only. public let snapshotType: String? - /// The current status of the serverless cache. Available for Redis OSS and Serverless Memcached only. + /// The current status of the serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only. public let status: String? @inlinable @@ -5800,9 +5825,9 @@ extension ElastiCache { public struct ServiceUpdate: AWSDecodableShape { /// Indicates whether the service update will be automatically applied once the recommended apply-by date has expired. public let autoUpdateAfterRecommendedApplyByDate: Bool? - /// The Elasticache engine to which the update applies. Either Redis OSS or Memcached. + /// The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached. public let engine: String? - /// The Elasticache engine version to which the update applies. Either Redis OSS or Memcached engine version. + /// The Elasticache engine version to which the update applies. Either Valkey, Redis OSS or Memcached engine version. public let engineVersion: String? /// The estimated length of time the service update will take public let estimatedUpdateTime: String? @@ -5895,9 +5920,9 @@ extension ElastiCache { /// The ARN (Amazon Resource Name) of the snapshot. public let arn: String? - /// Indicates the status of automatic failover for the source Redis OSS replication group. + /// Indicates the status of automatic failover for the source Valkey or Redis OSS replication group. public let automaticFailover: AutomaticFailoverStatus? - ///  If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. + ///  If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. public let autoMinorVersionUpgrade: Bool? /// The date and time when the source cluster was created. public let cacheClusterCreateTime: Date? @@ -5931,7 +5956,7 @@ extension ElastiCache { /// cache.r6g.4xlarge, /// cache.r6g.8xlarge, /// cache.r6g.12xlarge, - /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS configuration variables appendonly and appendfsync are not supported on Redis OSS version 2.8.22 and later. + /// cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1 instances. The configuration variables appendonly and appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and later. public let cacheNodeType: String? /// The cache parameter group that is associated with the source cluster. public let cacheParameterGroupName: String? @@ -5948,7 +5973,7 @@ extension ElastiCache { /// A list of the cache nodes in the source cluster. @OptionalCustomCoding> public var nodeSnapshots: [NodeSnapshot]? - /// The number of cache nodes in the source cluster. For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. + /// The number of cache nodes in the source cluster. For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. public let numCacheNodes: Int? /// The number of node groups (shards) in this snapshot. When restoring from a snapshot, the number of node groups (shards) in the snapshot and in the restored replication group must be the same. public let numNodeGroups: Int? @@ -6044,7 +6069,7 @@ extension ElastiCache { } public struct StartMigrationMessage: AWSEncodableShape { - /// List of endpoints from which data should be migrated. For Redis OSS (cluster mode disabled), list should have only one element. + /// List of endpoints from which data should be migrated. For Valkey or Redis OSS (cluster mode disabled), the list should have only one element. @OptionalCustomCoding> public var customerNodeEndpointList: [CustomerNodeEndpoint]? /// The ID of the replication group to which data should be migrated. @@ -6082,7 +6107,7 @@ extension ElastiCache { public let subnetIdentifier: String? /// The outpost ARN of the subnet. public let subnetOutpost: SubnetOutpost? - /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. + /// Either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2 and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system. @OptionalCustomCoding> public var supportedNetworkTypes: [NetworkType]? @@ -6277,7 +6302,7 @@ extension ElastiCache { /// The status of the service update on the cache node @OptionalCustomCoding> public var cacheNodeUpdateStatus: [CacheNodeUpdateStatus]? - /// The Elasticache engine to which the update applies. Either Redis OSS or Memcached. + /// The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached. public let engine: String? /// The estimated length of time for the update to complete public let estimatedUpdateTime: String? @@ -6454,7 +6479,7 @@ extension ElastiCache { /// A list of replication groups that the user group can access. @OptionalCustomCoding> public var replicationGroups: [String]? - /// Indicates which serverless caches the specified user group is associated with. Available for Redis OSS and Serverless Memcached only. + /// Indicates which serverless caches the specified user group is associated with. Available for Valkey, Redis OSS and Serverless Memcached only. @OptionalCustomCoding> public var serverlessCaches: [String]? /// Indicates user group status. Can be "creating", "active", "modifying", "deleting". @@ -6699,7 +6724,7 @@ public struct ElastiCacheErrorType: AWSErrorType { public static var invalidParameterValueException: Self { .init(.invalidParameterValueException) } /// The requested replication group is not in the available state. public static var invalidReplicationGroupStateFault: Self { .init(.invalidReplicationGroupStateFault) } - /// The state of the serverless cache snapshot was not received. Available for Redis OSS and Serverless Memcached only. + /// The state of the serverless cache snapshot was not received. Available for Valkey, Redis OSS and Serverless Memcached only. public static var invalidServerlessCacheSnapshotStateFault: Self { .init(.invalidServerlessCacheSnapshotStateFault) } /// The account for these credentials is not currently active. public static var invalidServerlessCacheStateFault: Self { .init(.invalidServerlessCacheStateFault) } @@ -6745,11 +6770,11 @@ public struct ElastiCacheErrorType: AWSErrorType { public static var serverlessCacheNotFoundFault: Self { .init(.serverlessCacheNotFoundFault) } /// The number of serverless caches exceeds the customer quota. public static var serverlessCacheQuotaForCustomerExceededFault: Self { .init(.serverlessCacheQuotaForCustomerExceededFault) } - /// A serverless cache snapshot with this name already exists. Available for Redis OSS and Serverless Memcached only. + /// A serverless cache snapshot with this name already exists. Available for Valkey, Redis OSS and Serverless Memcached only. public static var serverlessCacheSnapshotAlreadyExistsFault: Self { .init(.serverlessCacheSnapshotAlreadyExistsFault) } - /// This serverless cache snapshot could not be found or does not exist. Available for Redis OSS and Serverless Memcached only. + /// This serverless cache snapshot could not be found or does not exist. Available for Valkey, Redis OSS and Serverless Memcached only. public static var serverlessCacheSnapshotNotFoundFault: Self { .init(.serverlessCacheSnapshotNotFoundFault) } - /// The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis OSS and Serverless Memcached only. + /// The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Valkey, Redis OSS and Serverless Memcached only. public static var serverlessCacheSnapshotQuotaExceededFault: Self { .init(.serverlessCacheSnapshotQuotaExceededFault) } /// The specified service linked role (SLR) was not found. public static var serviceLinkedRoleNotFoundFault: Self { .init(.serviceLinkedRoleNotFoundFault) } @@ -6757,7 +6782,7 @@ public struct ElastiCacheErrorType: AWSErrorType { public static var serviceUpdateNotFoundFault: Self { .init(.serviceUpdateNotFoundFault) } /// You already have a snapshot with the given name. public static var snapshotAlreadyExistsFault: Self { .init(.snapshotAlreadyExistsFault) } - /// You attempted one of the following operations: Creating a snapshot of a Redis OSS cluster running on a cache.t1.micro cache node. Creating a snapshot of a cluster that is running Memcached rather than Redis OSS. Neither of these are supported by ElastiCache. + /// You attempted one of the following operations: Creating a snapshot of a Valkey or Redis OSS cluster running on a cache.t1.micro cache node. Creating a snapshot of a cluster that is running Memcached rather than Valkey or Redis OSS. Neither of these are supported by ElastiCache. public static var snapshotFeatureNotSupportedFault: Self { .init(.snapshotFeatureNotSupportedFault) } /// The requested snapshot name does not refer to an existing snapshot. public static var snapshotNotFoundFault: Self { .init(.snapshotNotFoundFault) } diff --git a/Sources/Soto/Services/ElasticInference/ElasticInference_api.swift b/Sources/Soto/Services/ElasticInference/ElasticInference_api.swift index 1a61069d38..3a2a190e94 100644 --- a/Sources/Soto/Services/ElasticInference/ElasticInference_api.swift +++ b/Sources/Soto/Services/ElasticInference/ElasticInference_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS ElasticInference service. /// -/// Elastic Inference public APIs. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. +/// Amazon Elastic Inference is no longer available. Elastic Inference public APIs. public struct ElasticInference: AWSService { // MARK: Member variables @@ -90,7 +90,7 @@ public struct ElasticInference: AWSService { // MARK: API Calls - /// Describes the locations in which a given accelerator type or set of types is present in a given region. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Describes the locations in which a given accelerator type or set of types is present in a given region. @Sendable @inlinable public func describeAcceleratorOfferings(_ input: DescribeAcceleratorOfferingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAcceleratorOfferingsResponse { @@ -103,7 +103,7 @@ public struct ElasticInference: AWSService { logger: logger ) } - /// Describes the locations in which a given accelerator type or set of types is present in a given region. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Describes the locations in which a given accelerator type or set of types is present in a given region. /// /// Parameters: /// - acceleratorTypes: The list of accelerator types to describe. @@ -122,7 +122,7 @@ public struct ElasticInference: AWSService { return try await self.describeAcceleratorOfferings(input, logger: logger) } - /// Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. @Sendable @inlinable public func describeAcceleratorTypes(_ input: DescribeAcceleratorTypesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAcceleratorTypesResponse { @@ -135,7 +135,7 @@ public struct ElasticInference: AWSService { logger: logger ) } - /// Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. /// /// Parameters: /// - logger: Logger use during operation @@ -148,7 +148,7 @@ public struct ElasticInference: AWSService { return try await self.describeAcceleratorTypes(input, logger: logger) } - /// Describes information over a provided set of accelerators belonging to an account. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Describes information over a provided set of accelerators belonging to an account. @Sendable @inlinable public func describeAccelerators(_ input: DescribeAcceleratorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAcceleratorsResponse { @@ -161,7 +161,7 @@ public struct ElasticInference: AWSService { logger: logger ) } - /// Describes information over a provided set of accelerators belonging to an account. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Describes information over a provided set of accelerators belonging to an account. /// /// Parameters: /// - acceleratorIds: The IDs of the accelerators to describe. @@ -186,7 +186,7 @@ public struct ElasticInference: AWSService { return try await self.describeAccelerators(input, logger: logger) } - /// Returns all tags of an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Returns all tags of an Elastic Inference Accelerator. @Sendable @inlinable public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResult { @@ -199,7 +199,7 @@ public struct ElasticInference: AWSService { logger: logger ) } - /// Returns all tags of an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Returns all tags of an Elastic Inference Accelerator. /// /// Parameters: /// - resourceArn: The ARN of the Elastic Inference Accelerator to list the tags for. @@ -215,7 +215,7 @@ public struct ElasticInference: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Adds the specified tags to an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Adds the specified tags to an Elastic Inference Accelerator. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResult { @@ -228,7 +228,7 @@ public struct ElasticInference: AWSService { logger: logger ) } - /// Adds the specified tags to an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Adds the specified tags to an Elastic Inference Accelerator. /// /// Parameters: /// - resourceArn: The ARN of the Elastic Inference Accelerator to tag. @@ -247,7 +247,7 @@ public struct ElasticInference: AWSService { return try await self.tagResource(input, logger: logger) } - /// Removes the specified tags from an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Removes the specified tags from an Elastic Inference Accelerator. @Sendable @inlinable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResult { @@ -260,7 +260,7 @@ public struct ElasticInference: AWSService { logger: logger ) } - /// Removes the specified tags from an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service. + /// Amazon Elastic Inference is no longer available. Removes the specified tags from an Elastic Inference Accelerator. /// /// Parameters: /// - resourceArn: The ARN of the Elastic Inference Accelerator to untag. diff --git a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift index 02beb2bb18..248a83c6ae 100644 --- a/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift +++ b/Sources/Soto/Services/ElasticLoadBalancingV2/ElasticLoadBalancingV2_shapes.swift @@ -123,6 +123,22 @@ extension ElasticLoadBalancingV2 { public var description: String { return self.rawValue } } + public enum TargetAdministrativeOverrideReasonEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case internalError = "AdministrativeOverride.Unknown" + case noOverrideEngaged = "AdministrativeOverride.NoOverride" + case zonalShiftDelegatedToDns = "AdministrativeOverride.ZonalShiftDelegatedToDns" + case zonalShiftEngaged = "AdministrativeOverride.ZonalShiftActive" + public var description: String { return self.rawValue } + } + + public enum TargetAdministrativeOverrideStateEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case noOverride = "no_override" + case unknown = "unknown" + case zonalShiftActive = "zonal_shift_active" + case zonalShiftDelegatedToDns = "zonal_shift_delegated_to_dns" + public var description: String { return self.rawValue } + } + public enum TargetGroupIpAddressTypeEnum: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case ipv4 = "ipv4" case ipv6 = "ipv6" @@ -326,6 +342,28 @@ extension ElasticLoadBalancingV2 { } } + public struct AdministrativeOverride: AWSDecodableShape { + /// A description of the override state that provides additional details. + public let description: String? + /// The reason code for the state. + public let reason: TargetAdministrativeOverrideReasonEnum? + /// The state of the override. + public let state: TargetAdministrativeOverrideStateEnum? + + @inlinable + public init(description: String? = nil, reason: TargetAdministrativeOverrideReasonEnum? = nil, state: TargetAdministrativeOverrideStateEnum? = nil) { + self.description = description + self.reason = reason + self.state = state + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case reason = "Reason" + case state = "State" + } + } + public struct AnomalyDetection: AWSDecodableShape { /// Indicates whether anomaly mitigation is in progress. public let mitigationInEffect: MitigationInEffectEnum? @@ -2093,7 +2131,7 @@ extension ElasticLoadBalancingV2 { } public struct LoadBalancerAttribute: AWSEncodableShape & AWSDecodableShape { - /// The name of the attribute. The following attributes are supported by all load balancers: deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false. load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The possible values are true and false. The default for Network Load Balancers and Gateway Load Balancers is false. The default for Application Load Balancers is true, and cannot be changed. The following attributes are supported by both Application Load Balancers and Network Load Balancers: access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false. access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket. access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs. ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the load balancer. It is set to false for internet-facing load balancers and true for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway. The following attributes are supported by only Application Load Balancers: idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds. client_keep_alive.seconds - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. connection_logs.s3.enabled - Indicates whether connection logs are enabled. The value is true or false. The default is false. connection_logs.s3.bucket - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket. connection_logs.s3.prefix - The prefix for the location in the S3 bucket for the connection logs. routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive. routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false. routing.http.preserve_host_header.enabled - Indicates whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. The possible values are true and false. The default is false. routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true and false. The default is false. routing.http.xff_client_port.enabled - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. The possible values are true and false. The default is false. routing.http.xff_header_processing.mode - Enables you to modify, preserve, or remove the X-Forwarded-For header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append, preserve, and remove. The default is append. If the value is append, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For header in the HTTP request before it sends it to targets. If the value is preserve the Application Load Balancer preserves the X-Forwarded-For header in the HTTP request, and sends it to targets without any change. If the value is remove, the Application Load Balancer removes the X-Forwarded-For header in the HTTP request before it sends it to targets. routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible values are true and false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The default is false. The following attributes are supported by only Network Load Balancers: dns_record.client_routing_policy - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity with 100 percent zonal affinity, partial_availability_zone_affinity with 85 percent zonal affinity, and any_availability_zone with 0 percent zonal affinity. + /// The name of the attribute. The following attributes are supported by all load balancers: deletion_protection.enabled - Indicates whether deletion protection is enabled. The value is true or false. The default is false. load_balancing.cross_zone.enabled - Indicates whether cross-zone load balancing is enabled. The possible values are true and false. The default for Network Load Balancers and Gateway Load Balancers is false. The default for Application Load Balancers is true, and cannot be changed. The following attributes are supported by both Application Load Balancers and Network Load Balancers: access_logs.s3.enabled - Indicates whether access logs are enabled. The value is true or false. The default is false. access_logs.s3.bucket - The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket. access_logs.s3.prefix - The prefix for the location in the S3 bucket for the access logs. ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the load balancer. It is set to false for internet-facing load balancers and true for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway. The following attributes are supported by only Application Load Balancers: idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds. client_keep_alive.seconds - The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds. connection_logs.s3.enabled - Indicates whether connection logs are enabled. The value is true or false. The default is false. connection_logs.s3.bucket - The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket. connection_logs.s3.prefix - The prefix for the location in the S3 bucket for the connection logs. routing.http.desync_mitigation_mode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive. routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). The default is false. routing.http.preserve_host_header.enabled - Indicates whether the Application Load Balancer should preserve the Host header in the HTTP request and send it to the target without any change. The possible values are true and false. The default is false. routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true and false. The default is false. routing.http.xff_client_port.enabled - Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. The possible values are true and false. The default is false. routing.http.xff_header_processing.mode - Enables you to modify, preserve, or remove the X-Forwarded-For header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append, preserve, and remove. The default is append. If the value is append, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For header in the HTTP request before it sends it to targets. If the value is preserve the Application Load Balancer preserves the X-Forwarded-For header in the HTTP request, and sends it to targets without any change. If the value is remove, the Application Load Balancer removes the X-Forwarded-For header in the HTTP request before it sends it to targets. routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible values are true and false. The default is true. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens. waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The default is false. The following attributes are supported by only Network Load Balancers: dns_record.client_routing_policy - Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity with 100 percent zonal affinity, partial_availability_zone_affinity with 85 percent zonal affinity, and any_availability_zone with 0 percent zonal affinity. zonal_shift.config.enabled - Indicates whether zonal shift is enabled. The possible values are true and false. The default is false. public let key: String? /// The value of the attribute. public let value: String? @@ -3324,6 +3362,8 @@ extension ElasticLoadBalancingV2 { } public struct TargetHealthDescription: AWSDecodableShape { + /// The administrative override information for the target. + public let administrativeOverride: AdministrativeOverride? /// The anomaly detection result for the target. If no anomalies were detected, the result is normal. If anomalies were detected, the result is anomalous. public let anomalyDetection: AnomalyDetection? /// The port to use to connect with the target. @@ -3334,7 +3374,8 @@ extension ElasticLoadBalancingV2 { public let targetHealth: TargetHealth? @inlinable - public init(anomalyDetection: AnomalyDetection? = nil, healthCheckPort: String? = nil, target: TargetDescription? = nil, targetHealth: TargetHealth? = nil) { + public init(administrativeOverride: AdministrativeOverride? = nil, anomalyDetection: AnomalyDetection? = nil, healthCheckPort: String? = nil, target: TargetDescription? = nil, targetHealth: TargetHealth? = nil) { + self.administrativeOverride = administrativeOverride self.anomalyDetection = anomalyDetection self.healthCheckPort = healthCheckPort self.target = target @@ -3342,6 +3383,7 @@ extension ElasticLoadBalancingV2 { } private enum CodingKeys: String, CodingKey { + case administrativeOverride = "AdministrativeOverride" case anomalyDetection = "AnomalyDetection" case healthCheckPort = "HealthCheckPort" case target = "Target" diff --git a/Sources/Soto/Services/FSx/FSx_api.swift b/Sources/Soto/Services/FSx/FSx_api.swift index 3fc976b52c..c7add37219 100644 --- a/Sources/Soto/Services/FSx/FSx_api.swift +++ b/Sources/Soto/Services/FSx/FSx_api.swift @@ -300,7 +300,7 @@ public struct FSx: AWSService { /// Parameters: /// - batchImportMetaDataOnCreate: Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Default is false. /// - clientRequestToken: - /// - dataRepositoryPath: The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. + /// - dataRepositoryPath: The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional). This path specifies where in the S3 data repository files will be imported from or exported to. /// - fileSystemId: /// - fileSystemPath: A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. If you specify only a forward slash (/) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system. /// - importedFileChunkSize: For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system. The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. @@ -351,7 +351,7 @@ public struct FSx: AWSService { /// - capacityToRelease: Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache. /// - clientRequestToken: /// - fileSystemId: - /// - paths: A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks). For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional). For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path. A file must also meet the last accessed time criteria specified in for the file to be released. + /// - paths: A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks). For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix (where prefix is optional). For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path. A file must also meet the last accessed time criteria specified in for the file to be released. /// - releaseConfiguration: The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system. /// - report: Defines whether or not Amazon FSx provides a CompletionReport once the task has completed. A CompletionReport provides a detailed report on the files that Amazon FSx processed that meet the criteria specified by the Scope parameter. For more information, see Working with Task Completion Reports. /// - tags: diff --git a/Sources/Soto/Services/FSx/FSx_shapes.swift b/Sources/Soto/Services/FSx/FSx_shapes.swift index e5091087af..60e2637a03 100644 --- a/Sources/Soto/Services/FSx/FSx_shapes.swift +++ b/Sources/Soto/Services/FSx/FSx_shapes.swift @@ -833,7 +833,7 @@ extension FSx { public let enabled: Bool? /// Required if Enabled is set to true. Specifies the format of the CompletionReport. REPORT_CSV_20191124 is the only format currently supported. When Format is set to REPORT_CSV_20191124, the CompletionReport is provided in CSV format, and is delivered to {path}/task-{id}/failures.csv. public let format: ReportFormat? - /// Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path you provide must be located within the file system’s ExportPath. An example Path value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. + /// Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. The Path you provide must be located within the file system’s ExportPath. An example Path value is "s3://amzn-s3-demo-bucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: FilePath, FileStatus, and ErrorCode. public let path: String? /// Required if Enabled is set to true. Specifies the scope of the CompletionReport; FAILED_FILES_ONLY is the only scope currently supported. When Scope is set to FAILED_FILES_ONLY, the CompletionReport only contains information about files that the data repository task failed to process. public let scope: ReportScope? @@ -1076,7 +1076,7 @@ extension FSx { /// Set to true to run an import data repository task to import metadata from the data repository to the file system after the data repository association is created. Default is false. public let batchImportMetaDataOnCreate: Bool? public let clientRequestToken: String? - /// The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to. + /// The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional). This path specifies where in the S3 data repository files will be imported from or exported to. public let dataRepositoryPath: String? public let fileSystemId: String? /// A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2. This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. If you specify only a forward slash (/) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system. @@ -1153,7 +1153,7 @@ extension FSx { public let capacityToRelease: Int64? public let clientRequestToken: String? public let fileSystemId: String? - /// A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks). For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix (where myPrefix is optional). For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path. A file must also meet the last accessed time criteria specified in for the file to be released. + /// A list of paths for the data repository task to use when the task is processed. If a path that you provide isn't valid, the task fails. If you don't provide paths, the default behavior is to export all files to S3 (for export tasks), import all files from S3 (for import tasks), or release all exported files that meet the last accessed time criteria (for release tasks). For export tasks, the list contains paths on the FSx for Lustre file system from which the files are exported to the Amazon S3 bucket. The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or file on the file system you want to export, then the path to provide is path1. For import tasks, the list contains paths in the Amazon S3 bucket from which POSIX metadata changes are imported to the FSx for Lustre file system. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix (where prefix is optional). For release tasks, the list contains directory or file paths on the FSx for Lustre file system from which to release exported files. If a directory is specified, files within the directory are released. If a file path is specified, only that file is released. To release all exported files in the file system, specify a forward slash (/) as the path. A file must also meet the last accessed time criteria specified in for the file to be released. public let paths: [String]? /// The configuration that specifies the last accessed time criteria for files that will be released from an Amazon FSx for Lustre file system. public let releaseConfiguration: ReleaseConfiguration? @@ -2450,7 +2450,7 @@ extension FSx { /// A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to true. BatchImportMetaDataOnCreate is not supported for data repositories linked to an Amazon File Cache resource. public let batchImportMetaDataOnCreate: Bool? public let creationTime: Date? - /// The path to the data repository that will be linked to the cache or file system. For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats: If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association. If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter. For Amazon File Cache, the path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. + /// The path to the data repository that will be linked to the cache or file system. For Amazon File Cache, the path can be an NFS data repository that will be linked to the cache. The path can be in one of two formats: If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nsf://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association. If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter. For Amazon File Cache, the path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional). For Amazon FSx for Lustre, the path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional). public let dataRepositoryPath: String? /// For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories. public let dataRepositorySubdirectories: [String]? @@ -4032,7 +4032,7 @@ extension FSx { } public struct FileCacheDataRepositoryAssociation: AWSEncodableShape { - /// The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths: The path can be an NFS data repository that links to the cache. The path can be in one of two formats: If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nfs://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association. If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. + /// The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths: The path can be an NFS data repository that links to the cache. The path can be in one of two formats: If you are not using the DataRepositorySubdirectories parameter, the path is to an NFS Export directory (or one of its subdirectories) in the format nfs://nfs-domain-name/exportpath. You can therefore link a single NFS Export to a single data repository association. If you are using the DataRepositorySubdirectories parameter, the path is the domain name of the NFS file system in the format nfs://filer-domain-name, which indicates the root of the subdirectories specified with the DataRepositorySubdirectories parameter. The path can be an S3 bucket or prefix in the format s3://bucket-name/prefix/ (where prefix is optional). public let dataRepositoryPath: String? /// A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories. public let dataRepositorySubdirectories: [String]? diff --git a/Sources/Soto/Services/Firehose/Firehose_api.swift b/Sources/Soto/Services/Firehose/Firehose_api.swift index fa776503f8..db3a59f2e3 100644 --- a/Sources/Soto/Services/Firehose/Firehose_api.swift +++ b/Sources/Soto/Services/Firehose/Firehose_api.swift @@ -82,8 +82,43 @@ public struct Firehose: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.dualstack]: .init(endpoints: [ + "af-south-1": "firehose.af-south-1.api.aws", + "ap-east-1": "firehose.ap-east-1.api.aws", + "ap-northeast-1": "firehose.ap-northeast-1.api.aws", + "ap-northeast-2": "firehose.ap-northeast-2.api.aws", + "ap-northeast-3": "firehose.ap-northeast-3.api.aws", + "ap-south-1": "firehose.ap-south-1.api.aws", + "ap-south-2": "firehose.ap-south-2.api.aws", + "ap-southeast-1": "firehose.ap-southeast-1.api.aws", + "ap-southeast-2": "firehose.ap-southeast-2.api.aws", + "ap-southeast-3": "firehose.ap-southeast-3.api.aws", + "ap-southeast-4": "firehose.ap-southeast-4.api.aws", + "ca-central-1": "firehose.ca-central-1.api.aws", + "ca-west-1": "firehose.ca-west-1.api.aws", "cn-north-1": "firehose.cn-north-1.api.amazonwebservices.com.cn", - "cn-northwest-1": "firehose.cn-northwest-1.api.amazonwebservices.com.cn" + "cn-northwest-1": "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + "eu-central-1": "firehose.eu-central-1.api.aws", + "eu-central-2": "firehose.eu-central-2.api.aws", + "eu-north-1": "firehose.eu-north-1.api.aws", + "eu-south-1": "firehose.eu-south-1.api.aws", + "eu-south-2": "firehose.eu-south-2.api.aws", + "eu-west-1": "firehose.eu-west-1.api.aws", + "eu-west-2": "firehose.eu-west-2.api.aws", + "eu-west-3": "firehose.eu-west-3.api.aws", + "il-central-1": "firehose.il-central-1.api.aws", + "me-central-1": "firehose.me-central-1.api.aws", + "me-south-1": "firehose.me-south-1.api.aws", + "sa-east-1": "firehose.sa-east-1.api.aws", + "us-east-1": "firehose.us-east-1.api.aws", + "us-east-2": "firehose.us-east-2.api.aws", + "us-west-1": "firehose.us-west-1.api.aws", + "us-west-2": "firehose.us-west-2.api.aws" + ]), + [.dualstack, .fips]: .init(endpoints: [ + "us-east-1": "firehose-fips.us-east-1.api.aws", + "us-east-2": "firehose-fips.us-east-2.api.aws", + "us-west-1": "firehose-fips.us-west-1.api.aws", + "us-west-2": "firehose-fips.us-west-2.api.aws" ]), [.fips]: .init(endpoints: [ "us-east-1": "firehose-fips.us-east-1.amazonaws.com", diff --git a/Sources/Soto/Services/Glue/Glue_api.swift b/Sources/Soto/Services/Glue/Glue_api.swift index 199ab9cc38..c5f347caea 100644 --- a/Sources/Soto/Services/Glue/Glue_api.swift +++ b/Sources/Soto/Services/Glue/Glue_api.swift @@ -7307,6 +7307,38 @@ public struct Glue: AWSService { return try await self.tagResource(input, logger: logger) } + /// Tests a connection to a service to validate the service credentials that you provide. You can either provide an existing connection name or a TestConnectionInput for testing a non-existing connection input. Providing both at the same time will cause an error. If the action is successful, the service sends back an HTTP 200 response. + @Sendable + @inlinable + public func testConnection(_ input: TestConnectionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TestConnectionResponse { + try await self.client.execute( + operation: "TestConnection", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Tests a connection to a service to validate the service credentials that you provide. You can either provide an existing connection name or a TestConnectionInput for testing a non-existing connection input. Providing both at the same time will cause an error. If the action is successful, the service sends back an HTTP 200 response. + /// + /// Parameters: + /// - connectionName: Optional. The name of the connection to test. If only name is provided, the operation will get the connection and use that for testing. + /// - testConnectionInput: A structure that is used to specify testing a connection to a service. + /// - logger: Logger use during operation + @inlinable + public func testConnection( + connectionName: String? = nil, + testConnectionInput: TestConnectionInput? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TestConnectionResponse { + let input = TestConnectionRequest( + connectionName: connectionName, + testConnectionInput: testConnectionInput + ) + return try await self.testConnection(input, logger: logger) + } + /// Removes tags from a resource. @Sendable @inlinable diff --git a/Sources/Soto/Services/Glue/Glue_shapes.swift b/Sources/Soto/Services/Glue/Glue_shapes.swift index 83a5cc8a66..4b64e27af8 100644 --- a/Sources/Soto/Services/Glue/Glue_shapes.swift +++ b/Sources/Soto/Services/Glue/Glue_shapes.swift @@ -4151,6 +4151,8 @@ extension Glue { } public struct Connection: AWSDecodableShape { + /// This field is not currently used. + public let athenaProperties: [String: String]? /// The authentication properties of the connection. public let authenticationConfiguration: AuthenticationConfiguration? /// These key-value pairs define parameters for the connection: HOST - The host URI: either the fully qualified domain name (FQDN) or the IPv4 address of the database host. PORT - The port number, between 1024 and 65535, of the port on which the database host is listening for database connections. USER_NAME - The name under which to log in to the database. The value string for USER_NAME is "USERNAME". PASSWORD - A password, if one is used, for the user name. ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password. JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver to use. JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. JDBC_ENGINE - The name of the JDBC engine to use. JDBC_ENGINE_VERSION - The version of the JDBC engine to use. CONFIG_FILES - (Reserved for future use.) INSTANCE_ID - The instance ID to use. JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source. JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the client. The default is false. CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format. SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate. CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate. CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source. SECRET_ID - The secret ID used for the secret manager of credentials. CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection. CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection. CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection. KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself. KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is "true". KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string. KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is "false". KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional). KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional). KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional). ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected). ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_MECHANISM - "SCRAM-SHA-512", "GSSAPI", "AWS_MSK_IAM", or "PLAIN". These are the supported SASL Mechanisms. KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the "PLAIN" mechanism. KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the "PLAIN" mechanism. ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the "SCRAM-SHA-512" mechanism. KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the "SCRAM-SHA-512" mechanism. ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected). KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager. KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab. KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf. KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration. KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers. ROLE_ARN - The role to be used for running queries. REGION - The Amazon Web Services Region where queries will be run. WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run. CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in which queries will run. DATABASE - The Amazon Redshift database that you are connecting to. @@ -4179,7 +4181,8 @@ extension Glue { public let statusReason: String? @inlinable - public init(authenticationConfiguration: AuthenticationConfiguration? = nil, connectionProperties: [ConnectionPropertyKey: String]? = nil, connectionType: ConnectionType? = nil, creationTime: Date? = nil, description: String? = nil, lastConnectionValidationTime: Date? = nil, lastUpdatedBy: String? = nil, lastUpdatedTime: Date? = nil, matchCriteria: [String]? = nil, name: String? = nil, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil, status: ConnectionStatus? = nil, statusReason: String? = nil) { + public init(athenaProperties: [String: String]? = nil, authenticationConfiguration: AuthenticationConfiguration? = nil, connectionProperties: [ConnectionPropertyKey: String]? = nil, connectionType: ConnectionType? = nil, creationTime: Date? = nil, description: String? = nil, lastConnectionValidationTime: Date? = nil, lastUpdatedBy: String? = nil, lastUpdatedTime: Date? = nil, matchCriteria: [String]? = nil, name: String? = nil, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil, status: ConnectionStatus? = nil, statusReason: String? = nil) { + self.athenaProperties = athenaProperties self.authenticationConfiguration = authenticationConfiguration self.connectionProperties = connectionProperties self.connectionType = connectionType @@ -4196,6 +4199,7 @@ extension Glue { } private enum CodingKeys: String, CodingKey { + case athenaProperties = "AthenaProperties" case authenticationConfiguration = "AuthenticationConfiguration" case connectionProperties = "ConnectionProperties" case connectionType = "ConnectionType" @@ -4213,6 +4217,8 @@ extension Glue { } public struct ConnectionInput: AWSEncodableShape { + /// This field is not currently used. + public let athenaProperties: [String: String]? /// The authentication properties of the connection. Used for a Salesforce connection. public let authenticationConfiguration: AuthenticationConfigurationInput? /// These key-value pairs define parameters for the connection. @@ -4231,7 +4237,8 @@ extension Glue { public let validateCredentials: Bool? @inlinable - public init(authenticationConfiguration: AuthenticationConfigurationInput? = nil, connectionProperties: [ConnectionPropertyKey: String], connectionType: ConnectionType, description: String? = nil, matchCriteria: [String]? = nil, name: String, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil, validateCredentials: Bool? = nil) { + public init(athenaProperties: [String: String]? = nil, authenticationConfiguration: AuthenticationConfigurationInput? = nil, connectionProperties: [ConnectionPropertyKey: String], connectionType: ConnectionType, description: String? = nil, matchCriteria: [String]? = nil, name: String, physicalConnectionRequirements: PhysicalConnectionRequirements? = nil, validateCredentials: Bool? = nil) { + self.athenaProperties = athenaProperties self.authenticationConfiguration = authenticationConfiguration self.connectionProperties = connectionProperties self.connectionType = connectionType @@ -4243,6 +4250,12 @@ extension Glue { } public func validate(name: String) throws { + try self.athenaProperties?.forEach { + try validate($0.key, name: "athenaProperties.key", parent: name, max: 128) + try validate($0.key, name: "athenaProperties.key", parent: name, min: 1) + try validate($0.value, name: "athenaProperties[\"\($0.key)\"]", parent: name, max: 2048) + try validate($0.value, name: "athenaProperties[\"\($0.key)\"]", parent: name, min: 1) + } try self.authenticationConfiguration?.validate(name: "\(name).authenticationConfiguration") try self.connectionProperties.forEach { try validate($0.value, name: "connectionProperties[\"\($0.key)\"]", parent: name, max: 1024) @@ -4263,6 +4276,7 @@ extension Glue { } private enum CodingKeys: String, CodingKey { + case athenaProperties = "AthenaProperties" case authenticationConfiguration = "AuthenticationConfiguration" case connectionProperties = "ConnectionProperties" case connectionType = "ConnectionType" @@ -22579,6 +22593,65 @@ extension Glue { } } + public struct TestConnectionInput: AWSEncodableShape { + /// A structure containing the authentication configuration in the TestConnection request. Required for a connection to Salesforce using OAuth authentication. + public let authenticationConfiguration: AuthenticationConfigurationInput? + /// The key-value pairs that define parameters for the connection. JDBC connections use the following connection properties: Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL. Required: All of (USERNAME, PASSWORD) or SECRET_ID. Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC. SALESFORCE connections require the AuthenticationConfiguration member to be configured. + public let connectionProperties: [ConnectionPropertyKey: String] + /// The type of connection to test. This operation is only available for the JDBC or SALESFORCE connection types. + public let connectionType: ConnectionType + + @inlinable + public init(authenticationConfiguration: AuthenticationConfigurationInput? = nil, connectionProperties: [ConnectionPropertyKey: String], connectionType: ConnectionType) { + self.authenticationConfiguration = authenticationConfiguration + self.connectionProperties = connectionProperties + self.connectionType = connectionType + } + + public func validate(name: String) throws { + try self.authenticationConfiguration?.validate(name: "\(name).authenticationConfiguration") + try self.connectionProperties.forEach { + try validate($0.value, name: "connectionProperties[\"\($0.key)\"]", parent: name, max: 1024) + } + try self.validate(self.connectionProperties, name: "connectionProperties", parent: name, max: 100) + } + + private enum CodingKeys: String, CodingKey { + case authenticationConfiguration = "AuthenticationConfiguration" + case connectionProperties = "ConnectionProperties" + case connectionType = "ConnectionType" + } + } + + public struct TestConnectionRequest: AWSEncodableShape { + /// Optional. The name of the connection to test. If only name is provided, the operation will get the connection and use that for testing. + public let connectionName: String? + /// A structure that is used to specify testing a connection to a service. + public let testConnectionInput: TestConnectionInput? + + @inlinable + public init(connectionName: String? = nil, testConnectionInput: TestConnectionInput? = nil) { + self.connectionName = connectionName + self.testConnectionInput = testConnectionInput + } + + public func validate(name: String) throws { + try self.validate(self.connectionName, name: "connectionName", parent: name, max: 255) + try self.validate(self.connectionName, name: "connectionName", parent: name, min: 1) + try self.validate(self.connectionName, name: "connectionName", parent: name, pattern: "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$") + try self.testConnectionInput?.validate(name: "\(name).testConnectionInput") + } + + private enum CodingKeys: String, CodingKey { + case connectionName = "ConnectionName" + case testConnectionInput = "TestConnectionInput" + } + } + + public struct TestConnectionResponse: AWSDecodableShape { + public init() {} + } + public struct TimestampFilter: AWSEncodableShape { /// The timestamp after which statistics should be included in the results. public let recordedAfter: Date? diff --git a/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift b/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift index c13c658337..3ffc5316c2 100644 --- a/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift +++ b/Sources/Soto/Services/GuardDuty/GuardDuty_shapes.swift @@ -2981,6 +2981,8 @@ extension GuardDuty { public let definitionArn: String? /// The name of the task group that's associated with the task. public let group: String? + /// A capacity on which the task is running. For example, Fargate and EC2. + public let launchType: String? /// The Unix timestamp for the time when the task started. public let startedAt: Date? /// Contains the tag specified when a task is started. @@ -2995,11 +2997,12 @@ extension GuardDuty { public let volumes: [Volume]? @inlinable - public init(arn: String? = nil, containers: [Container]? = nil, definitionArn: String? = nil, group: String? = nil, startedAt: Date? = nil, startedBy: String? = nil, tags: [Tag]? = nil, taskCreatedAt: Date? = nil, version: String? = nil, volumes: [Volume]? = nil) { + public init(arn: String? = nil, containers: [Container]? = nil, definitionArn: String? = nil, group: String? = nil, launchType: String? = nil, startedAt: Date? = nil, startedBy: String? = nil, tags: [Tag]? = nil, taskCreatedAt: Date? = nil, version: String? = nil, volumes: [Volume]? = nil) { self.arn = arn self.containers = containers self.definitionArn = definitionArn self.group = group + self.launchType = launchType self.startedAt = startedAt self.startedBy = startedBy self.tags = tags @@ -3013,6 +3016,7 @@ extension GuardDuty { case containers = "containers" case definitionArn = "definitionArn" case group = "group" + case launchType = "launchType" case startedAt = "startedAt" case startedBy = "startedBy" case tags = "tags" @@ -4529,7 +4533,7 @@ extension GuardDuty { case requestUri = "requestUri" case resource = "resource" case resourceName = "resourceName" - case sourceIps = "sourceIps" + case sourceIps = "sourceIPs" case statusCode = "statusCode" case subresource = "subresource" case userAgent = "userAgent" @@ -5845,6 +5849,8 @@ extension GuardDuty { public let connectionDirection: String? /// The local IP information of the connection. public let localIpDetails: LocalIpDetails? + /// The EC2 instance's local elastic network interface utilized for the connection. + public let localNetworkInterface: String? /// The local port information of the connection. public let localPortDetails: LocalPortDetails? /// The network connection protocol. @@ -5855,10 +5861,11 @@ extension GuardDuty { public let remotePortDetails: RemotePortDetails? @inlinable - public init(blocked: Bool? = nil, connectionDirection: String? = nil, localIpDetails: LocalIpDetails? = nil, localPortDetails: LocalPortDetails? = nil, protocol: String? = nil, remoteIpDetails: RemoteIpDetails? = nil, remotePortDetails: RemotePortDetails? = nil) { + public init(blocked: Bool? = nil, connectionDirection: String? = nil, localIpDetails: LocalIpDetails? = nil, localNetworkInterface: String? = nil, localPortDetails: LocalPortDetails? = nil, protocol: String? = nil, remoteIpDetails: RemoteIpDetails? = nil, remotePortDetails: RemotePortDetails? = nil) { self.blocked = blocked self.connectionDirection = connectionDirection self.localIpDetails = localIpDetails + self.localNetworkInterface = localNetworkInterface self.localPortDetails = localPortDetails self.`protocol` = `protocol` self.remoteIpDetails = remoteIpDetails @@ -5869,6 +5876,7 @@ extension GuardDuty { case blocked = "blocked" case connectionDirection = "connectionDirection" case localIpDetails = "localIpDetails" + case localNetworkInterface = "localNetworkInterface" case localPortDetails = "localPortDetails" case `protocol` = "protocol" case remoteIpDetails = "remoteIpDetails" diff --git a/Sources/Soto/Services/IVS/IVS_shapes.swift b/Sources/Soto/Services/IVS/IVS_shapes.swift index 7c2d0cc920..53fbb0918a 100644 --- a/Sources/Soto/Services/IVS/IVS_shapes.swift +++ b/Sources/Soto/Services/IVS/IVS_shapes.swift @@ -1847,6 +1847,15 @@ extension IVS { } public struct StreamEvent: AWSDecodableShape { + /// Provides additional details about the stream event. There are several values; note that + /// the long descriptions are provided in the IVS console but not delivered through + /// the IVS API or EventBridge: StreamTakeoverMediaMismatch — The broadcast client attempted to take over + /// with different media properties (e.g., codec, resolution, or video track type) from the + /// original stream. StreamTakeoverInvalidPriority — The broadcast client attempted a takeover + /// with either a priority integer value equal to or lower than the original stream's value or a value outside + /// the allowed range of 1 to 2,147,483,647. StreamTakeoverLimitBreached — The broadcast client reached the maximum allowed + /// takeover attempts for this stream. + public let code: String? /// Time when the event occurred. This is an ISO 8601 timestamp; note that this is returned as a string. @OptionalCustomCoding public var eventTime: Date? @@ -1856,13 +1865,15 @@ extension IVS { public let type: String? @inlinable - public init(eventTime: Date? = nil, name: String? = nil, type: String? = nil) { + public init(code: String? = nil, eventTime: Date? = nil, name: String? = nil, type: String? = nil) { + self.code = code self.eventTime = eventTime self.name = name self.type = type } private enum CodingKeys: String, CodingKey { + case code = "code" case eventTime = "eventTime" case name = "name" case type = "type" diff --git a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift index 262e433a4a..cbe14035d8 100644 --- a/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift +++ b/Sources/Soto/Services/IVSRealTime/IVSRealTime_shapes.swift @@ -46,9 +46,12 @@ extension IVSRealTime { } public enum EventErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bFramePresent = "B_FRAME_PRESENT" case bitrateExceeded = "BITRATE_EXCEEDED" case insufficientCapabilities = "INSUFFICIENT_CAPABILITIES" + case internalServerException = "INTERNAL_SERVER_EXCEPTION" case invalidAudioCodec = "INVALID_AUDIO_CODEC" + case invalidInput = "INVALID_INPUT" case invalidProtocol = "INVALID_PROTOCOL" case invalidStreamKey = "INVALID_STREAM_KEY" case invalidVideoCodec = "INVALID_VIDEO_CODEC" @@ -896,7 +899,41 @@ extension IVSRealTime { } public struct Event: AWSDecodableShape { - /// If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an action that the participant’s token is not allowed to do. For more information about participant capabilities, see the capabilities field in CreateParticipantToken. QUOTA_EXCEEDED indicates that the number of participants who want to publish/subscribe to a stage exceeds the quota; for more information, see Service Quotas. PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a publisher that doesn’t exist. + /// If the event is an error event, the error code is provided to give insight into the specific error that occurred. If the event is not an error event, this field is null. B_FRAME_PRESENT — + /// The participant's stream includes B-frames. + /// For details, see + /// IVS RTMP Publishing. BITRATE_EXCEEDED — + /// The participant exceeded the maximum supported bitrate. + /// For details, see + /// Service Quotas. INSUFFICIENT_CAPABILITIES — + /// The participant tried to take an action + /// that the participant’s token is not allowed to do. For details on participant capabilities, see + /// the capabilities field in CreateParticipantToken. INTERNAL_SERVER_EXCEPTION — + /// The participant failed to publish to the stage due to an internal server error. INVALID_AUDIO_CODEC — + /// The participant is using an invalid audio codec. + /// For details, see + /// Stream Ingest. INVALID_INPUT — + /// The participant is using an invalid input stream. INVALID_PROTOCOL — + /// The participant's IngestConfiguration resource is configured for RTMPS but they tried streaming with RTMP. + /// For details, see + /// IVS RTMP Publishing. INVALID_STREAM_KEY — + /// The participant is using an invalid stream key. + /// For details, see + /// IVS RTMP Publishing. INVALID_VIDEO_CODEC — + /// The participant is using an invalid video codec. + /// For details, see + /// Stream Ingest. PUBLISHER_NOT_FOUND — + /// The participant tried to subscribe to a publisher that doesn’t exist. QUOTA_EXCEEDED — + /// The number of participants who want to publish/subscribe to a stage exceeds the quota. + /// For details, see + /// Service Quotas. RESOLUTION_EXCEEDED — + /// The participant exceeded the maximum supported resolution. + /// For details, see + /// Service Quotas. REUSE_OF_STREAM_KEY — + /// The participant tried to use a stream key that is associated with another active stage session. STREAM_DURATION_EXCEEDED — + /// The participant exceeded the maximum allowed stream duration. + /// For details, see + /// Service Quotas. public let errorCode: EventErrorCode? /// ISO 8601 timestamp (returned as a string) for when the event occurred. @OptionalCustomCoding @@ -2756,9 +2793,9 @@ extension IVSRealTime { public let bitrate: Int? /// Video frame rate, in fps. Default: 30. public let framerate: Float? - /// Video-resolution height. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720. + /// Video-resolution height. This must be an even number. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720. public let height: Int? - /// Video-resolution width. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280. + /// Video-resolution width. This must be an even number. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280. public let width: Int? @inlinable @@ -2775,9 +2812,9 @@ extension IVSRealTime { try self.validate(self.framerate, name: "framerate", parent: name, max: 60.0) try self.validate(self.framerate, name: "framerate", parent: name, min: 1.0) try self.validate(self.height, name: "height", parent: name, max: 1920) - try self.validate(self.height, name: "height", parent: name, min: 1) + try self.validate(self.height, name: "height", parent: name, min: 2) try self.validate(self.width, name: "width", parent: name, max: 1920) - try self.validate(self.width, name: "width", parent: name, min: 1) + try self.validate(self.width, name: "width", parent: name, min: 2) } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/IoT/IoT_api.swift b/Sources/Soto/Services/IoT/IoT_api.swift index 03b7620e15..42ac181df7 100644 --- a/Sources/Soto/Services/IoT/IoT_api.swift +++ b/Sources/Soto/Services/IoT/IoT_api.swift @@ -203,6 +203,44 @@ public struct IoT: AWSService { return try await self.addThingToThingGroup(input, logger: logger) } + /// Associates the selected software bill of materials (SBOM) with a specific software package version. Requires permission to access the AssociateSbomWithPackageVersion action. + @Sendable + @inlinable + public func associateSbomWithPackageVersion(_ input: AssociateSbomWithPackageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateSbomWithPackageVersionResponse { + try await self.client.execute( + operation: "AssociateSbomWithPackageVersion", + path: "/packages/{packageName}/versions/{versionName}/sbom", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Associates the selected software bill of materials (SBOM) with a specific software package version. Requires permission to access the AssociateSbomWithPackageVersion action. + /// + /// Parameters: + /// - clientToken: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. + /// - packageName: The name of the new software package. + /// - sbom: + /// - versionName: The name of the new package version. + /// - logger: Logger use during operation + @inlinable + public func associateSbomWithPackageVersion( + clientToken: String? = AssociateSbomWithPackageVersionRequest.idempotencyToken(), + packageName: String, + sbom: Sbom, + versionName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AssociateSbomWithPackageVersionResponse { + let input = AssociateSbomWithPackageVersionRequest( + clientToken: clientToken, + packageName: packageName, + sbom: sbom, + versionName: versionName + ) + return try await self.associateSbomWithPackageVersion(input, logger: logger) + } + /// Associates a group with a continuous job. The following criteria must be met: The job must have been created with the targetSelection field set to "CONTINUOUS". The job status must currently be "IN_PROGRESS". The total number of targets associated with a job must not exceed 100. Requires permission to access the AssociateTargetsWithJob action. @Sendable @inlinable @@ -715,7 +753,9 @@ public struct IoT: AWSService { return try await self.createAuthorizer(input, logger: logger) } - /// Creates a billing group. Requires permission to access the CreateBillingGroup action. + /// Creates a billing group. If this call is made multiple times using + /// the same billing group name and configuration, the call will succeed. If this call is made with + /// the same billing group name but different configuration a ResourceAlreadyExistsException is thrown. Requires permission to access the CreateBillingGroup action. @Sendable @inlinable public func createBillingGroup(_ input: CreateBillingGroupRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateBillingGroupResponse { @@ -728,7 +768,9 @@ public struct IoT: AWSService { logger: logger ) } - /// Creates a billing group. Requires permission to access the CreateBillingGroup action. + /// Creates a billing group. If this call is made multiple times using + /// the same billing group name and configuration, the call will succeed. If this call is made with + /// the same billing group name but different configuration a ResourceAlreadyExistsException is thrown. Requires permission to access the CreateBillingGroup action. /// /// Parameters: /// - billingGroupName: The name you wish to give to the billing group. @@ -921,7 +963,10 @@ public struct IoT: AWSService { /// Creates a domain configuration. Requires permission to access the CreateDomainConfiguration action. /// /// Parameters: + /// - applicationProtocol: An enumerated string that specifies the application-layer protocol. SECURE_MQTT - MQTT over TLS. MQTT_WSS - MQTT over WebSocket. HTTPS - HTTP over TLS. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. For more information, see Device communication protocols. + /// - authenticationType: An enumerated string that specifies the authentication type. CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate. CUSTOM_AUTH - Use custom authentication and authorization. For more information, see Custom authentication and authorization. AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information, see X.509 client certificates. AWS_SIGV4 - Use Amazon Web Services Signature Version 4. For more information, see IAM users, groups, and roles. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type. For more information, see Device communication protocols. /// - authorizerConfig: An object that specifies the authorization service for a domain. + /// - clientCertificateConfig: An object that specifies the client certificate configuration for a domain. /// - domainConfigurationName: The name of the domain configuration. This value must be unique to a region. /// - domainName: The name of the domain. /// - serverCertificateArns: The ARNs of the certificates that IoT passes to the device during the TLS handshake. Currently you can specify only one certificate ARN. This value is not required for Amazon Web Services-managed domains. @@ -933,7 +978,10 @@ public struct IoT: AWSService { /// - logger: Logger use during operation @inlinable public func createDomainConfiguration( + applicationProtocol: ApplicationProtocol? = nil, + authenticationType: AuthenticationType? = nil, authorizerConfig: AuthorizerConfig? = nil, + clientCertificateConfig: ClientCertificateConfig? = nil, domainConfigurationName: String, domainName: String? = nil, serverCertificateArns: [String]? = nil, @@ -945,7 +993,10 @@ public struct IoT: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateDomainConfigurationResponse { let input = CreateDomainConfigurationRequest( + applicationProtocol: applicationProtocol, + authenticationType: authenticationType, authorizerConfig: authorizerConfig, + clientCertificateConfig: clientCertificateConfig, domainConfigurationName: domainConfigurationName, domainName: domainName, serverCertificateArns: serverCertificateArns, @@ -1386,28 +1437,34 @@ public struct IoT: AWSService { /// Creates a new version for an existing IoT software package. Requires permission to access the CreatePackageVersion and GetIndexingConfiguration actions. /// /// Parameters: + /// - artifact: The various build components created during the build process such as libraries and configuration files that make up a software package version. /// - attributes: Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet. The combined size of all the attributes on a package version is limited to 3KB. /// - clientToken: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. /// - description: A summary of the package version being created. This can be used to outline the package's contents or purpose. /// - packageName: The name of the associated software package. + /// - recipe: The inline job document associated with a software package version used for a quick job deployment. /// - tags: Metadata that can be used to manage the package version. /// - versionName: The name of the new package version. /// - logger: Logger use during operation @inlinable public func createPackageVersion( + artifact: PackageVersionArtifact? = nil, attributes: [String: String]? = nil, clientToken: String? = CreatePackageVersionRequest.idempotencyToken(), description: String? = nil, packageName: String, + recipe: String? = nil, tags: [String: String]? = nil, versionName: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreatePackageVersionResponse { let input = CreatePackageVersionRequest( + artifact: artifact, attributes: attributes, clientToken: clientToken, description: description, packageName: packageName, + recipe: recipe, tags: tags, versionName: versionName ) @@ -1598,7 +1655,7 @@ public struct IoT: AWSService { return try await self.createProvisioningTemplateVersion(input, logger: logger) } - /// Creates a role alias. Requires permission to access the CreateRoleAlias action. + /// Creates a role alias. Requires permission to access the CreateRoleAlias action. The value of credentialDurationSeconds must be less than or equal to the maximum session duration of the IAM role that the role alias references. For more information, see Modifying a role maximum session duration (Amazon Web Services API) from the Amazon Web Services Identity and Access Management User Guide. @Sendable @inlinable public func createRoleAlias(_ input: CreateRoleAliasRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateRoleAliasResponse { @@ -1611,7 +1668,7 @@ public struct IoT: AWSService { logger: logger ) } - /// Creates a role alias. Requires permission to access the CreateRoleAlias action. + /// Creates a role alias. Requires permission to access the CreateRoleAlias action. The value of credentialDurationSeconds must be less than or equal to the maximum session duration of the IAM role that the role alias references. For more information, see Modifying a role maximum session duration (Amazon Web Services API) from the Amazon Web Services Identity and Access Management User Guide. /// /// Parameters: /// - credentialDurationSeconds: How long (in seconds) the credentials will be valid. The default value is 3,600 seconds. This value must be less than or equal to the maximum session duration of the IAM role that the role alias references. @@ -1858,7 +1915,10 @@ public struct IoT: AWSService { return try await self.createThingGroup(input, logger: logger) } - /// Creates a new thing type. Requires permission to access the CreateThingType action. + /// Creates a new thing type. If this call is made multiple times using + /// the same thing type name and configuration, the call will succeed. If this call is made with + /// the same thing type name but different configuration a ResourceAlreadyExistsException is thrown. + /// Requires permission to access the CreateThingType action. @Sendable @inlinable public func createThingType(_ input: CreateThingTypeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateThingTypeResponse { @@ -1871,7 +1931,10 @@ public struct IoT: AWSService { logger: logger ) } - /// Creates a new thing type. Requires permission to access the CreateThingType action. + /// Creates a new thing type. If this call is made multiple times using + /// the same thing type name and configuration, the call will succeed. If this call is made with + /// the same thing type name but different configuration a ResourceAlreadyExistsException is thrown. + /// Requires permission to access the CreateThingType action. /// /// Parameters: /// - tags: Metadata which can be used to manage the thing type. @@ -3607,14 +3670,17 @@ public struct IoT: AWSService { /// Describes a job. Requires permission to access the DescribeJob action. /// /// Parameters: + /// - beforeSubstitution: A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values. /// - jobId: The unique identifier you assigned to this job when it was created. /// - logger: Logger use during operation @inlinable public func describeJob( + beforeSubstitution: Bool? = nil, jobId: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> DescribeJobResponse { let input = DescribeJobRequest( + beforeSubstitution: beforeSubstitution, jobId: jobId ) return try await self.describeJob(input, logger: logger) @@ -4201,6 +4267,41 @@ public struct IoT: AWSService { return try await self.disableTopicRule(input, logger: logger) } + /// Disassociates the selected software bill of materials (SBOM) from a specific software package version. Requires permission to access the DisassociateSbomWithPackageVersion action. + @Sendable + @inlinable + public func disassociateSbomFromPackageVersion(_ input: DisassociateSbomFromPackageVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateSbomFromPackageVersionResponse { + try await self.client.execute( + operation: "DisassociateSbomFromPackageVersion", + path: "/packages/{packageName}/versions/{versionName}/sbom", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disassociates the selected software bill of materials (SBOM) from a specific software package version. Requires permission to access the DisassociateSbomWithPackageVersion action. + /// + /// Parameters: + /// - clientToken: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. + /// - packageName: The name of the new software package. + /// - versionName: The name of the new package version. + /// - logger: Logger use during operation + @inlinable + public func disassociateSbomFromPackageVersion( + clientToken: String? = DisassociateSbomFromPackageVersionRequest.idempotencyToken(), + packageName: String, + versionName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisassociateSbomFromPackageVersionResponse { + let input = DisassociateSbomFromPackageVersionRequest( + clientToken: clientToken, + packageName: packageName, + versionName: versionName + ) + return try await self.disassociateSbomFromPackageVersion(input, logger: logger) + } + /// Enables the rule. Requires permission to access the EnableTopicRule action. @Sendable @inlinable @@ -4421,14 +4522,17 @@ public struct IoT: AWSService { /// Gets a job document. Requires permission to access the GetJobDocument action. /// /// Parameters: + /// - beforeSubstitution: A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values. /// - jobId: The unique identifier you assigned to this job when it was created. /// - logger: Logger use during operation @inlinable public func getJobDocument( + beforeSubstitution: Bool? = nil, jobId: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> GetJobDocumentResponse { let input = GetJobDocumentRequest( + beforeSubstitution: beforeSubstitution, jobId: jobId ) return try await self.getJobDocument(input, logger: logger) @@ -6335,6 +6439,47 @@ public struct IoT: AWSService { return try await self.listRoleAliases(input, logger: logger) } + /// The validation results for all software bill of materials (SBOM) attached to a specific software package version. Requires permission to access the ListSbomValidationResults action. + @Sendable + @inlinable + public func listSbomValidationResults(_ input: ListSbomValidationResultsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSbomValidationResultsResponse { + try await self.client.execute( + operation: "ListSbomValidationResults", + path: "/packages/{packageName}/versions/{versionName}/sbom-validation-results", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// The validation results for all software bill of materials (SBOM) attached to a specific software package version. Requires permission to access the ListSbomValidationResults action. + /// + /// Parameters: + /// - maxResults: The maximum number of results to return at one time. + /// - nextToken: A token that can be used to retrieve the next set of results, or null if there are no additional results. + /// - packageName: The name of the new software package. + /// - validationResult: The end result of the + /// - versionName: The name of the new package version. + /// - logger: Logger use during operation + @inlinable + public func listSbomValidationResults( + maxResults: Int? = nil, + nextToken: String? = nil, + packageName: String, + validationResult: SbomValidationResult? = nil, + versionName: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSbomValidationResultsResponse { + let input = ListSbomValidationResultsRequest( + maxResults: maxResults, + nextToken: nextToken, + packageName: packageName, + validationResult: validationResult, + versionName: versionName + ) + return try await self.listSbomValidationResults(input, logger: logger) + } + /// Lists all of your scheduled audits. Requires permission to access the ListScheduledAudits action. @Sendable @inlinable @@ -8326,7 +8471,10 @@ public struct IoT: AWSService { /// Updates values stored in the domain configuration. Domain configurations for default endpoints can't be updated. Requires permission to access the UpdateDomainConfiguration action. /// /// Parameters: + /// - applicationProtocol: An enumerated string that specifies the application-layer protocol. SECURE_MQTT - MQTT over TLS. MQTT_WSS - MQTT over WebSocket. HTTPS - HTTP over TLS. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. For more information, see Device communication protocols. + /// - authenticationType: An enumerated string that specifies the authentication type. CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate. CUSTOM_AUTH - Use custom authentication and authorization. For more information, see Custom authentication and authorization. AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information, see X.509 client certificates. AWS_SIGV4 - Use Amazon Web Services Signature Version 4. For more information, see IAM users, groups, and roles. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type. For more information, see Device communication protocols. /// - authorizerConfig: An object that specifies the authorization service for a domain. + /// - clientCertificateConfig: An object that specifies the client certificate configuration for a domain. /// - domainConfigurationName: The name of the domain configuration to be updated. /// - domainConfigurationStatus: The status to which the domain configuration should be updated. /// - removeAuthorizerConfig: Removes the authorization configuration from a domain. @@ -8335,7 +8483,10 @@ public struct IoT: AWSService { /// - logger: Logger use during operation @inlinable public func updateDomainConfiguration( + applicationProtocol: ApplicationProtocol? = nil, + authenticationType: AuthenticationType? = nil, authorizerConfig: AuthorizerConfig? = nil, + clientCertificateConfig: ClientCertificateConfig? = nil, domainConfigurationName: String, domainConfigurationStatus: DomainConfigurationStatus? = nil, removeAuthorizerConfig: Bool? = nil, @@ -8344,7 +8495,10 @@ public struct IoT: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateDomainConfigurationResponse { let input = UpdateDomainConfigurationRequest( + applicationProtocol: applicationProtocol, + authenticationType: authenticationType, authorizerConfig: authorizerConfig, + clientCertificateConfig: clientCertificateConfig, domainConfigurationName: domainConfigurationName, domainConfigurationStatus: domainConfigurationStatus, removeAuthorizerConfig: removeAuthorizerConfig, @@ -8690,28 +8844,34 @@ public struct IoT: AWSService { /// /// Parameters: /// - action: The status that the package version should be assigned. For more information, see Package version lifecycle. + /// - artifact: The various components that make up a software package version. /// - attributes: Metadata that can be used to define a package version’s configuration. For example, the Amazon S3 file location, configuration options that are being sent to the device or fleet. Note: Attributes can be updated only when the package version is in a draft state. The combined size of all the attributes on a package version is limited to 3KB. /// - clientToken: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. /// - description: The package version description. /// - packageName: The name of the associated software package. + /// - recipe: The inline job document associated with a software package version used for a quick job deployment. /// - versionName: The name of the target package version. /// - logger: Logger use during operation @inlinable public func updatePackageVersion( action: PackageVersionAction? = nil, + artifact: PackageVersionArtifact? = nil, attributes: [String: String]? = nil, clientToken: String? = UpdatePackageVersionRequest.idempotencyToken(), description: String? = nil, packageName: String, + recipe: String? = nil, versionName: String, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdatePackageVersionResponse { let input = UpdatePackageVersionRequest( action: action, + artifact: artifact, attributes: attributes, clientToken: clientToken, description: description, packageName: packageName, + recipe: recipe, versionName: versionName ) return try await self.updatePackageVersion(input, logger: logger) @@ -8764,7 +8924,7 @@ public struct IoT: AWSService { return try await self.updateProvisioningTemplate(input, logger: logger) } - /// Updates a role alias. Requires permission to access the UpdateRoleAlias action. + /// Updates a role alias. Requires permission to access the UpdateRoleAlias action. The value of credentialDurationSeconds must be less than or equal to the maximum session duration of the IAM role that the role alias references. For more information, see Modifying a role maximum session duration (Amazon Web Services API) from the Amazon Web Services Identity and Access Management User Guide. @Sendable @inlinable public func updateRoleAlias(_ input: UpdateRoleAliasRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRoleAliasResponse { @@ -8777,7 +8937,7 @@ public struct IoT: AWSService { logger: logger ) } - /// Updates a role alias. Requires permission to access the UpdateRoleAlias action. + /// Updates a role alias. Requires permission to access the UpdateRoleAlias action. The value of credentialDurationSeconds must be less than or equal to the maximum session duration of the IAM role that the role alias references. For more information, see Modifying a role maximum session duration (Amazon Web Services API) from the Amazon Web Services Identity and Access Management User Guide. /// /// Parameters: /// - credentialDurationSeconds: The number of seconds the credential will be valid. This value must be less than or equal to the maximum session duration of the IAM role that the role alias references. @@ -10683,6 +10843,49 @@ extension IoT { return self.listRoleAliasesPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listSbomValidationResults(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listSbomValidationResultsPaginator( + _ input: ListSbomValidationResultsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSbomValidationResults, + inputKey: \ListSbomValidationResultsRequest.nextToken, + outputKey: \ListSbomValidationResultsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listSbomValidationResults(_:logger:)``. + /// + /// - Parameters: + /// - maxResults: The maximum number of results to return at one time. + /// - packageName: The name of the new software package. + /// - validationResult: The end result of the + /// - versionName: The name of the new package version. + /// - logger: Logger used for logging + @inlinable + public func listSbomValidationResultsPaginator( + maxResults: Int? = nil, + packageName: String, + validationResult: SbomValidationResult? = nil, + versionName: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListSbomValidationResultsRequest( + maxResults: maxResults, + packageName: packageName, + validationResult: validationResult, + versionName: versionName + ) + return self.listSbomValidationResultsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listScheduledAudits(_:logger:)``. /// /// - Parameters: @@ -11930,6 +12133,19 @@ extension IoT.ListRoleAliasesRequest: AWSPaginateToken { } } +extension IoT.ListSbomValidationResultsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> IoT.ListSbomValidationResultsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + packageName: self.packageName, + validationResult: self.validationResult, + versionName: self.versionName + ) + } +} + extension IoT.ListScheduledAuditsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> IoT.ListScheduledAuditsRequest { diff --git a/Sources/Soto/Services/IoT/IoT_shapes.swift b/Sources/Soto/Services/IoT/IoT_shapes.swift index 167dbc8720..49e129c43e 100644 --- a/Sources/Soto/Services/IoT/IoT_shapes.swift +++ b/Sources/Soto/Services/IoT/IoT_shapes.swift @@ -51,6 +51,14 @@ extension IoT { public var description: String { return self.rawValue } } + public enum ApplicationProtocol: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "DEFAULT" + case https = "HTTPS" + case mqttWss = "MQTT_WSS" + case secureMqtt = "SECURE_MQTT" + public var description: String { return self.rawValue } + } + public enum AuditCheckRunStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case canceled = "CANCELED" case completedCompliant = "COMPLETED_COMPLIANT" @@ -121,6 +129,15 @@ extension IoT { public var description: String { return self.rawValue } } + public enum AuthenticationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case `default` = "DEFAULT" + case awsSigv4 = "AWS_SIGV4" + case awsX509 = "AWS_X509" + case customAuth = "CUSTOM_AUTH" + case customAuthX509 = "CUSTOM_AUTH_X509" + public var description: String { return self.rawValue } + } + public enum AuthorizerStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case inactive = "INACTIVE" @@ -498,6 +515,25 @@ extension IoT { public var description: String { return self.rawValue } } + public enum SbomValidationErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fileSizeLimitExceeded = "FILE_SIZE_LIMIT_EXCEEDED" + case incompatibleFormat = "INCOMPATIBLE_FORMAT" + public var description: String { return self.rawValue } + } + + public enum SbomValidationResult: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case succeeded = "SUCCEEDED" + public var description: String { return self.rawValue } + } + + public enum SbomValidationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case succeeded = "SUCCEEDED" + public var description: String { return self.rawValue } + } + public enum ServerCertificateStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case invalid = "INVALID" case valid = "VALID" @@ -1121,6 +1157,75 @@ extension IoT { } } + public struct AssociateSbomWithPackageVersionRequest: AWSEncodableShape { + /// A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. + public let clientToken: String? + /// The name of the new software package. + public let packageName: String + public let sbom: Sbom + /// The name of the new package version. + public let versionName: String + + @inlinable + public init(clientToken: String? = AssociateSbomWithPackageVersionRequest.idempotencyToken(), packageName: String, sbom: Sbom, versionName: String) { + self.clientToken = clientToken + self.packageName = packageName + self.sbom = sbom + self.versionName = versionName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.clientToken, key: "clientToken") + request.encodePath(self.packageName, key: "packageName") + try container.encode(self.sbom, forKey: .sbom) + request.encodePath(self.versionName, key: "versionName") + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 64) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 36) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^\\S{36,64}$") + try self.validate(self.packageName, name: "packageName", parent: name, max: 128) + try self.validate(self.packageName, name: "packageName", parent: name, min: 1) + try self.validate(self.packageName, name: "packageName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + try self.sbom.validate(name: "\(name).sbom") + try self.validate(self.versionName, name: "versionName", parent: name, max: 64) + try self.validate(self.versionName, name: "versionName", parent: name, min: 1) + try self.validate(self.versionName, name: "versionName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + } + + private enum CodingKeys: String, CodingKey { + case sbom = "sbom" + } + } + + public struct AssociateSbomWithPackageVersionResponse: AWSDecodableShape { + /// The name of the new software package. + public let packageName: String? + public let sbom: Sbom? + /// The status of the initial validation for the software bill of materials against the Software Package Data Exchange (SPDX) and CycloneDX industry standard formats. + public let sbomValidationStatus: SbomValidationStatus? + /// The name of the new package version. + public let versionName: String? + + @inlinable + public init(packageName: String? = nil, sbom: Sbom? = nil, sbomValidationStatus: SbomValidationStatus? = nil, versionName: String? = nil) { + self.packageName = packageName + self.sbom = sbom + self.sbomValidationStatus = sbomValidationStatus + self.versionName = versionName + } + + private enum CodingKeys: String, CodingKey { + case packageName = "packageName" + case sbom = "sbom" + case sbomValidationStatus = "sbomValidationStatus" + case versionName = "versionName" + } + } + public struct AssociateTargetsWithJobRequest: AWSEncodableShape { /// An optional comment string describing why the job was associated with the targets. public let comment: String? @@ -2577,6 +2682,25 @@ extension IoT { public init() {} } + public struct ClientCertificateConfig: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the Lambda function that IoT invokes after mutual TLS authentication during the connection. + public let clientCertificateCallbackArn: String? + + @inlinable + public init(clientCertificateCallbackArn: String? = nil) { + self.clientCertificateCallbackArn = clientCertificateCallbackArn + } + + public func validate(name: String) throws { + try self.validate(self.clientCertificateCallbackArn, name: "clientCertificateCallbackArn", parent: name, max: 2048) + try self.validate(self.clientCertificateCallbackArn, name: "clientCertificateCallbackArn", parent: name, pattern: "^[\\s\\S]*$") + } + + private enum CodingKeys: String, CodingKey { + case clientCertificateCallbackArn = "clientCertificateCallbackArn" + } + } + public struct CloudwatchAlarmAction: AWSEncodableShape & AWSDecodableShape { /// The CloudWatch alarm name. public let alarmName: String @@ -3231,8 +3355,14 @@ extension IoT { } public struct CreateDomainConfigurationRequest: AWSEncodableShape { + /// An enumerated string that specifies the application-layer protocol. SECURE_MQTT - MQTT over TLS. MQTT_WSS - MQTT over WebSocket. HTTPS - HTTP over TLS. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. For more information, see Device communication protocols. + public let applicationProtocol: ApplicationProtocol? + /// An enumerated string that specifies the authentication type. CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate. CUSTOM_AUTH - Use custom authentication and authorization. For more information, see Custom authentication and authorization. AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information, see X.509 client certificates. AWS_SIGV4 - Use Amazon Web Services Signature Version 4. For more information, see IAM users, groups, and roles. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type. For more information, see Device communication protocols. + public let authenticationType: AuthenticationType? /// An object that specifies the authorization service for a domain. public let authorizerConfig: AuthorizerConfig? + /// An object that specifies the client certificate configuration for a domain. + public let clientCertificateConfig: ClientCertificateConfig? /// The name of the domain configuration. This value must be unique to a region. public let domainConfigurationName: String /// The name of the domain. @@ -3251,8 +3381,11 @@ extension IoT { public let validationCertificateArn: String? @inlinable - public init(authorizerConfig: AuthorizerConfig? = nil, domainConfigurationName: String, domainName: String? = nil, serverCertificateArns: [String]? = nil, serverCertificateConfig: ServerCertificateConfig? = nil, serviceType: ServiceType? = nil, tags: [Tag]? = nil, tlsConfig: TlsConfig? = nil, validationCertificateArn: String? = nil) { + public init(applicationProtocol: ApplicationProtocol? = nil, authenticationType: AuthenticationType? = nil, authorizerConfig: AuthorizerConfig? = nil, clientCertificateConfig: ClientCertificateConfig? = nil, domainConfigurationName: String, domainName: String? = nil, serverCertificateArns: [String]? = nil, serverCertificateConfig: ServerCertificateConfig? = nil, serviceType: ServiceType? = nil, tags: [Tag]? = nil, tlsConfig: TlsConfig? = nil, validationCertificateArn: String? = nil) { + self.applicationProtocol = applicationProtocol + self.authenticationType = authenticationType self.authorizerConfig = authorizerConfig + self.clientCertificateConfig = clientCertificateConfig self.domainConfigurationName = domainConfigurationName self.domainName = domainName self.serverCertificateArns = serverCertificateArns @@ -3266,7 +3399,10 @@ extension IoT { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.applicationProtocol, forKey: .applicationProtocol) + try container.encodeIfPresent(self.authenticationType, forKey: .authenticationType) try container.encodeIfPresent(self.authorizerConfig, forKey: .authorizerConfig) + try container.encodeIfPresent(self.clientCertificateConfig, forKey: .clientCertificateConfig) request.encodePath(self.domainConfigurationName, key: "domainConfigurationName") try container.encodeIfPresent(self.domainName, forKey: .domainName) try container.encodeIfPresent(self.serverCertificateArns, forKey: .serverCertificateArns) @@ -3279,6 +3415,7 @@ extension IoT { public func validate(name: String) throws { try self.authorizerConfig?.validate(name: "\(name).authorizerConfig") + try self.clientCertificateConfig?.validate(name: "\(name).clientCertificateConfig") try self.validate(self.domainConfigurationName, name: "domainConfigurationName", parent: name, max: 128) try self.validate(self.domainConfigurationName, name: "domainConfigurationName", parent: name, min: 1) try self.validate(self.domainConfigurationName, name: "domainConfigurationName", parent: name, pattern: "^[\\w.-]+$") @@ -3301,7 +3438,10 @@ extension IoT { } private enum CodingKeys: String, CodingKey { + case applicationProtocol = "applicationProtocol" + case authenticationType = "authenticationType" case authorizerConfig = "authorizerConfig" + case clientCertificateConfig = "clientCertificateConfig" case domainName = "domainName" case serverCertificateArns = "serverCertificateArns" case serverCertificateConfig = "serverCertificateConfig" @@ -4121,6 +4261,8 @@ extension IoT { } public struct CreatePackageVersionRequest: AWSEncodableShape { + /// The various build components created during the build process such as libraries and configuration files that make up a software package version. + public let artifact: PackageVersionArtifact? /// Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet. The combined size of all the attributes on a package version is limited to 3KB. public let attributes: [String: String]? /// A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -4129,17 +4271,21 @@ extension IoT { public let description: String? /// The name of the associated software package. public let packageName: String + /// The inline job document associated with a software package version used for a quick job deployment. + public let recipe: String? /// Metadata that can be used to manage the package version. public let tags: [String: String]? /// The name of the new package version. public let versionName: String @inlinable - public init(attributes: [String: String]? = nil, clientToken: String? = CreatePackageVersionRequest.idempotencyToken(), description: String? = nil, packageName: String, tags: [String: String]? = nil, versionName: String) { + public init(artifact: PackageVersionArtifact? = nil, attributes: [String: String]? = nil, clientToken: String? = CreatePackageVersionRequest.idempotencyToken(), description: String? = nil, packageName: String, recipe: String? = nil, tags: [String: String]? = nil, versionName: String) { + self.artifact = artifact self.attributes = attributes self.clientToken = clientToken self.description = description self.packageName = packageName + self.recipe = recipe self.tags = tags self.versionName = versionName } @@ -4147,15 +4293,18 @@ extension IoT { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.artifact, forKey: .artifact) try container.encodeIfPresent(self.attributes, forKey: .attributes) request.encodeQuery(self.clientToken, key: "clientToken") try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.packageName, key: "packageName") + try container.encodeIfPresent(self.recipe, forKey: .recipe) try container.encodeIfPresent(self.tags, forKey: .tags) request.encodePath(self.versionName, key: "versionName") } public func validate(name: String) throws { + try self.artifact?.validate(name: "\(name).artifact") try self.attributes?.forEach { try validate($0.key, name: "attributes.key", parent: name, min: 1) try validate($0.key, name: "attributes.key", parent: name, pattern: "^[a-zA-Z0-9:_-]+$") @@ -4170,6 +4319,7 @@ extension IoT { try self.validate(self.packageName, name: "packageName", parent: name, max: 128) try self.validate(self.packageName, name: "packageName", parent: name, min: 1) try self.validate(self.packageName, name: "packageName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + try self.validate(self.recipe, name: "recipe", parent: name, max: 3072) try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -4184,8 +4334,10 @@ extension IoT { } private enum CodingKeys: String, CodingKey { + case artifact = "artifact" case attributes = "attributes" case description = "description" + case recipe = "recipe" case tags = "tags" } } @@ -6858,8 +7010,14 @@ extension IoT { } public struct DescribeDomainConfigurationResponse: AWSDecodableShape { + /// An enumerated string that specifies the application-layer protocol. SECURE_MQTT - MQTT over TLS. MQTT_WSS - MQTT over WebSocket. HTTPS - HTTP over TLS. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. For more information, see Device communication protocols. + public let applicationProtocol: ApplicationProtocol? + /// An enumerated string that specifies the authentication type. CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate. CUSTOM_AUTH - Use custom authentication and authorization. For more information, see Custom authentication and authorization. AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information, see X.509 client certificates. AWS_SIGV4 - Use Amazon Web Services Signature Version 4. For more information, see IAM users, groups, and roles. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type. For more information, see Device communication protocols. + public let authenticationType: AuthenticationType? /// An object that specifies the authorization service for a domain. public let authorizerConfig: AuthorizerConfig? + /// An object that specifies the client certificate configuration for a domain. + public let clientCertificateConfig: ClientCertificateConfig? /// The ARN of the domain configuration. public let domainConfigurationArn: String? /// The name of the domain configuration. @@ -6882,8 +7040,11 @@ extension IoT { public let tlsConfig: TlsConfig? @inlinable - public init(authorizerConfig: AuthorizerConfig? = nil, domainConfigurationArn: String? = nil, domainConfigurationName: String? = nil, domainConfigurationStatus: DomainConfigurationStatus? = nil, domainName: String? = nil, domainType: DomainType? = nil, lastStatusChangeDate: Date? = nil, serverCertificateConfig: ServerCertificateConfig? = nil, serverCertificates: [ServerCertificateSummary]? = nil, serviceType: ServiceType? = nil, tlsConfig: TlsConfig? = nil) { + public init(applicationProtocol: ApplicationProtocol? = nil, authenticationType: AuthenticationType? = nil, authorizerConfig: AuthorizerConfig? = nil, clientCertificateConfig: ClientCertificateConfig? = nil, domainConfigurationArn: String? = nil, domainConfigurationName: String? = nil, domainConfigurationStatus: DomainConfigurationStatus? = nil, domainName: String? = nil, domainType: DomainType? = nil, lastStatusChangeDate: Date? = nil, serverCertificateConfig: ServerCertificateConfig? = nil, serverCertificates: [ServerCertificateSummary]? = nil, serviceType: ServiceType? = nil, tlsConfig: TlsConfig? = nil) { + self.applicationProtocol = applicationProtocol + self.authenticationType = authenticationType self.authorizerConfig = authorizerConfig + self.clientCertificateConfig = clientCertificateConfig self.domainConfigurationArn = domainConfigurationArn self.domainConfigurationName = domainConfigurationName self.domainConfigurationStatus = domainConfigurationStatus @@ -6897,7 +7058,10 @@ extension IoT { } private enum CodingKeys: String, CodingKey { + case applicationProtocol = "applicationProtocol" + case authenticationType = "authenticationType" case authorizerConfig = "authorizerConfig" + case clientCertificateConfig = "clientCertificateConfig" case domainConfigurationArn = "domainConfigurationArn" case domainConfigurationName = "domainConfigurationName" case domainConfigurationStatus = "domainConfigurationStatus" @@ -7156,17 +7320,21 @@ extension IoT { } public struct DescribeJobRequest: AWSEncodableShape { + /// A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values. + public let beforeSubstitution: Bool? /// The unique identifier you assigned to this job when it was created. public let jobId: String @inlinable - public init(jobId: String) { + public init(beforeSubstitution: Bool? = nil, jobId: String) { + self.beforeSubstitution = beforeSubstitution self.jobId = jobId } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.beforeSubstitution, key: "beforeSubstitution") request.encodePath(self.jobId, key: "jobId") } @@ -8373,6 +8541,48 @@ extension IoT { private enum CodingKeys: CodingKey {} } + public struct DisassociateSbomFromPackageVersionRequest: AWSEncodableShape { + /// A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. + public let clientToken: String? + /// The name of the new software package. + public let packageName: String + /// The name of the new package version. + public let versionName: String + + @inlinable + public init(clientToken: String? = DisassociateSbomFromPackageVersionRequest.idempotencyToken(), packageName: String, versionName: String) { + self.clientToken = clientToken + self.packageName = packageName + self.versionName = versionName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.clientToken, key: "clientToken") + request.encodePath(self.packageName, key: "packageName") + request.encodePath(self.versionName, key: "versionName") + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 64) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 36) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^\\S{36,64}$") + try self.validate(self.packageName, name: "packageName", parent: name, max: 128) + try self.validate(self.packageName, name: "packageName", parent: name, min: 1) + try self.validate(self.packageName, name: "packageName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + try self.validate(self.versionName, name: "versionName", parent: name, max: 64) + try self.validate(self.versionName, name: "versionName", parent: name, min: 1) + try self.validate(self.versionName, name: "versionName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DisassociateSbomFromPackageVersionResponse: AWSDecodableShape { + public init() {} + } + public struct DocumentParameter: AWSDecodableShape { /// Description of the map field containing the patterns that need to be replaced in a managed template job document schema. public let description: String? @@ -8994,17 +9204,21 @@ extension IoT { } public struct GetJobDocumentRequest: AWSEncodableShape { + /// A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values. + public let beforeSubstitution: Bool? /// The unique identifier you assigned to this job when it was created. public let jobId: String @inlinable - public init(jobId: String) { + public init(beforeSubstitution: Bool? = nil, jobId: String) { + self.beforeSubstitution = beforeSubstitution self.jobId = jobId } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.beforeSubstitution, key: "beforeSubstitution") request.encodePath(self.jobId, key: "jobId") } @@ -9199,6 +9413,8 @@ extension IoT { } public struct GetPackageVersionResponse: AWSDecodableShape { + /// The various components that make up a software package version. + public let artifact: PackageVersionArtifact? /// Metadata that were added to the package version that can be used to define a package version’s configuration. public let attributes: [String: String]? /// The date when the package version was created. @@ -9213,13 +9429,20 @@ extension IoT { public let packageName: String? /// The ARN for the package version. public let packageVersionArn: String? + /// The inline job document associated with a software package version used for a quick job deployment. + public let recipe: String? + /// The software bill of materials for a software package version. + public let sbom: Sbom? + /// The status of the validation for a new software bill of materials added to a software package version. + public let sbomValidationStatus: SbomValidationStatus? /// The status associated to the package version. For more information, see Package version lifecycle. public let status: PackageVersionStatus? /// The name of the package version. public let versionName: String? @inlinable - public init(attributes: [String: String]? = nil, creationDate: Date? = nil, description: String? = nil, errorReason: String? = nil, lastModifiedDate: Date? = nil, packageName: String? = nil, packageVersionArn: String? = nil, status: PackageVersionStatus? = nil, versionName: String? = nil) { + public init(artifact: PackageVersionArtifact? = nil, attributes: [String: String]? = nil, creationDate: Date? = nil, description: String? = nil, errorReason: String? = nil, lastModifiedDate: Date? = nil, packageName: String? = nil, packageVersionArn: String? = nil, recipe: String? = nil, sbom: Sbom? = nil, sbomValidationStatus: SbomValidationStatus? = nil, status: PackageVersionStatus? = nil, versionName: String? = nil) { + self.artifact = artifact self.attributes = attributes self.creationDate = creationDate self.description = description @@ -9227,11 +9450,15 @@ extension IoT { self.lastModifiedDate = lastModifiedDate self.packageName = packageName self.packageVersionArn = packageVersionArn + self.recipe = recipe + self.sbom = sbom + self.sbomValidationStatus = sbomValidationStatus self.status = status self.versionName = versionName } private enum CodingKeys: String, CodingKey { + case artifact = "artifact" case attributes = "attributes" case creationDate = "creationDate" case description = "description" @@ -9239,6 +9466,9 @@ extension IoT { case lastModifiedDate = "lastModifiedDate" case packageName = "packageName" case packageVersionArn = "packageVersionArn" + case recipe = "recipe" + case sbom = "sbom" + case sbomValidationStatus = "sbomValidationStatus" case status = "status" case versionName = "versionName" } @@ -12639,6 +12869,69 @@ extension IoT { } } + public struct ListSbomValidationResultsRequest: AWSEncodableShape { + /// The maximum number of results to return at one time. + public let maxResults: Int? + /// A token that can be used to retrieve the next set of results, or null if there are no additional results. + public let nextToken: String? + /// The name of the new software package. + public let packageName: String + /// The end result of the + public let validationResult: SbomValidationResult? + /// The name of the new package version. + public let versionName: String + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil, packageName: String, validationResult: SbomValidationResult? = nil, versionName: String) { + self.maxResults = maxResults + self.nextToken = nextToken + self.packageName = packageName + self.validationResult = validationResult + self.versionName = versionName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.packageName, key: "packageName") + request.encodeQuery(self.validationResult, key: "validationResult") + request.encodePath(self.versionName, key: "versionName") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.packageName, name: "packageName", parent: name, max: 128) + try self.validate(self.packageName, name: "packageName", parent: name, min: 1) + try self.validate(self.packageName, name: "packageName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + try self.validate(self.versionName, name: "versionName", parent: name, max: 64) + try self.validate(self.versionName, name: "versionName", parent: name, min: 1) + try self.validate(self.versionName, name: "versionName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSbomValidationResultsResponse: AWSDecodableShape { + /// A token that can be used to retrieve the next set of results, or null if there are no additional results. + public let nextToken: String? + /// A summary of the validation results for each software bill of materials attached to a software package version. + public let validationResultSummaries: [SbomValidationResultSummary]? + + @inlinable + public init(nextToken: String? = nil, validationResultSummaries: [SbomValidationResultSummary]? = nil) { + self.nextToken = nextToken + self.validationResultSummaries = validationResultSummaries + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case validationResultSummaries = "validationResultSummaries" + } + } + public struct ListScheduledAuditsRequest: AWSEncodableShape { /// The maximum number of results to return at one time. The default is 25. public let maxResults: Int? @@ -14481,6 +14774,23 @@ extension IoT { } } + public struct PackageVersionArtifact: AWSEncodableShape & AWSDecodableShape { + public let s3Location: S3Location? + + @inlinable + public init(s3Location: S3Location? = nil) { + self.s3Location = s3Location + } + + public func validate(name: String) throws { + try self.s3Location?.validate(name: "\(name).s3Location") + } + + private enum CodingKeys: String, CodingKey { + case s3Location = "s3Location" + } + } + public struct PackageVersionSummary: AWSDecodableShape { /// The date that the package version was created. public let creationDate: Date? @@ -15543,6 +15853,49 @@ extension IoT { } } + public struct Sbom: AWSEncodableShape & AWSDecodableShape { + public let s3Location: S3Location? + + @inlinable + public init(s3Location: S3Location? = nil) { + self.s3Location = s3Location + } + + public func validate(name: String) throws { + try self.s3Location?.validate(name: "\(name).s3Location") + } + + private enum CodingKeys: String, CodingKey { + case s3Location = "s3Location" + } + } + + public struct SbomValidationResultSummary: AWSDecodableShape { + /// The errorCode representing the validation failure error if the SBOM validation failed. + public let errorCode: SbomValidationErrorCode? + /// The errorMessage representing the validation failure error if the SBOM validation failed. + public let errorMessage: String? + /// The name of the SBOM file. + public let fileName: String? + /// The end result of the SBOM validation. + public let validationResult: SbomValidationResult? + + @inlinable + public init(errorCode: SbomValidationErrorCode? = nil, errorMessage: String? = nil, fileName: String? = nil, validationResult: SbomValidationResult? = nil) { + self.errorCode = errorCode + self.errorMessage = errorMessage + self.fileName = fileName + self.validationResult = validationResult + } + + private enum CodingKeys: String, CodingKey { + case errorCode = "errorCode" + case errorMessage = "errorMessage" + case fileName = "fileName" + case validationResult = "validationResult" + } + } + public struct ScheduledAuditMetadata: AWSDecodableShape { /// The day of the month on which the scheduled audit is run (if the frequency is "MONTHLY"). If days 29-31 are specified, and the month does not have that many days, the audit takes place on the "LAST" day of the month. public let dayOfMonth: String? @@ -16823,7 +17176,7 @@ extension IoT { public let deviceDefender: String? /// The unnamed shadow and named shadow. For more information about shadows, see IoT Device Shadow service. public let shadow: String? - /// Thing group names. + /// Thing group and billing group names. public let thingGroupNames: [String]? /// The thing ID. public let thingId: String? @@ -18024,8 +18377,14 @@ extension IoT { } public struct UpdateDomainConfigurationRequest: AWSEncodableShape { + /// An enumerated string that specifies the application-layer protocol. SECURE_MQTT - MQTT over TLS. MQTT_WSS - MQTT over WebSocket. HTTPS - HTTP over TLS. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. For more information, see Device communication protocols. + public let applicationProtocol: ApplicationProtocol? + /// An enumerated string that specifies the authentication type. CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate. CUSTOM_AUTH - Use custom authentication and authorization. For more information, see Custom authentication and authorization. AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information, see X.509 client certificates. AWS_SIGV4 - Use Amazon Web Services Signature Version 4. For more information, see IAM users, groups, and roles. DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type. For more information, see Device communication protocols. + public let authenticationType: AuthenticationType? /// An object that specifies the authorization service for a domain. public let authorizerConfig: AuthorizerConfig? + /// An object that specifies the client certificate configuration for a domain. + public let clientCertificateConfig: ClientCertificateConfig? /// The name of the domain configuration to be updated. public let domainConfigurationName: String /// The status to which the domain configuration should be updated. @@ -18038,8 +18397,11 @@ extension IoT { public let tlsConfig: TlsConfig? @inlinable - public init(authorizerConfig: AuthorizerConfig? = nil, domainConfigurationName: String, domainConfigurationStatus: DomainConfigurationStatus? = nil, removeAuthorizerConfig: Bool? = nil, serverCertificateConfig: ServerCertificateConfig? = nil, tlsConfig: TlsConfig? = nil) { + public init(applicationProtocol: ApplicationProtocol? = nil, authenticationType: AuthenticationType? = nil, authorizerConfig: AuthorizerConfig? = nil, clientCertificateConfig: ClientCertificateConfig? = nil, domainConfigurationName: String, domainConfigurationStatus: DomainConfigurationStatus? = nil, removeAuthorizerConfig: Bool? = nil, serverCertificateConfig: ServerCertificateConfig? = nil, tlsConfig: TlsConfig? = nil) { + self.applicationProtocol = applicationProtocol + self.authenticationType = authenticationType self.authorizerConfig = authorizerConfig + self.clientCertificateConfig = clientCertificateConfig self.domainConfigurationName = domainConfigurationName self.domainConfigurationStatus = domainConfigurationStatus self.removeAuthorizerConfig = removeAuthorizerConfig @@ -18050,7 +18412,10 @@ extension IoT { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.applicationProtocol, forKey: .applicationProtocol) + try container.encodeIfPresent(self.authenticationType, forKey: .authenticationType) try container.encodeIfPresent(self.authorizerConfig, forKey: .authorizerConfig) + try container.encodeIfPresent(self.clientCertificateConfig, forKey: .clientCertificateConfig) request.encodePath(self.domainConfigurationName, key: "domainConfigurationName") try container.encodeIfPresent(self.domainConfigurationStatus, forKey: .domainConfigurationStatus) try container.encodeIfPresent(self.removeAuthorizerConfig, forKey: .removeAuthorizerConfig) @@ -18060,6 +18425,7 @@ extension IoT { public func validate(name: String) throws { try self.authorizerConfig?.validate(name: "\(name).authorizerConfig") + try self.clientCertificateConfig?.validate(name: "\(name).clientCertificateConfig") try self.validate(self.domainConfigurationName, name: "domainConfigurationName", parent: name, max: 128) try self.validate(self.domainConfigurationName, name: "domainConfigurationName", parent: name, min: 1) try self.validate(self.domainConfigurationName, name: "domainConfigurationName", parent: name, pattern: "^[\\w.:-]+$") @@ -18067,7 +18433,10 @@ extension IoT { } private enum CodingKeys: String, CodingKey { + case applicationProtocol = "applicationProtocol" + case authenticationType = "authenticationType" case authorizerConfig = "authorizerConfig" + case clientCertificateConfig = "clientCertificateConfig" case domainConfigurationStatus = "domainConfigurationStatus" case removeAuthorizerConfig = "removeAuthorizerConfig" case serverCertificateConfig = "serverCertificateConfig" @@ -18502,6 +18871,8 @@ extension IoT { public struct UpdatePackageVersionRequest: AWSEncodableShape { /// The status that the package version should be assigned. For more information, see Package version lifecycle. public let action: PackageVersionAction? + /// The various components that make up a software package version. + public let artifact: PackageVersionArtifact? /// Metadata that can be used to define a package version’s configuration. For example, the Amazon S3 file location, configuration options that are being sent to the device or fleet. Note: Attributes can be updated only when the package version is in a draft state. The combined size of all the attributes on a package version is limited to 3KB. public let attributes: [String: String]? /// A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -18510,16 +18881,20 @@ extension IoT { public let description: String? /// The name of the associated software package. public let packageName: String + /// The inline job document associated with a software package version used for a quick job deployment. + public let recipe: String? /// The name of the target package version. public let versionName: String @inlinable - public init(action: PackageVersionAction? = nil, attributes: [String: String]? = nil, clientToken: String? = UpdatePackageVersionRequest.idempotencyToken(), description: String? = nil, packageName: String, versionName: String) { + public init(action: PackageVersionAction? = nil, artifact: PackageVersionArtifact? = nil, attributes: [String: String]? = nil, clientToken: String? = UpdatePackageVersionRequest.idempotencyToken(), description: String? = nil, packageName: String, recipe: String? = nil, versionName: String) { self.action = action + self.artifact = artifact self.attributes = attributes self.clientToken = clientToken self.description = description self.packageName = packageName + self.recipe = recipe self.versionName = versionName } @@ -18527,14 +18902,17 @@ extension IoT { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) try container.encodeIfPresent(self.action, forKey: .action) + try container.encodeIfPresent(self.artifact, forKey: .artifact) try container.encodeIfPresent(self.attributes, forKey: .attributes) request.encodeQuery(self.clientToken, key: "clientToken") try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.packageName, key: "packageName") + try container.encodeIfPresent(self.recipe, forKey: .recipe) request.encodePath(self.versionName, key: "versionName") } public func validate(name: String) throws { + try self.artifact?.validate(name: "\(name).artifact") try self.attributes?.forEach { try validate($0.key, name: "attributes.key", parent: name, min: 1) try validate($0.key, name: "attributes.key", parent: name, pattern: "^[a-zA-Z0-9:_-]+$") @@ -18549,6 +18927,7 @@ extension IoT { try self.validate(self.packageName, name: "packageName", parent: name, max: 128) try self.validate(self.packageName, name: "packageName", parent: name, min: 1) try self.validate(self.packageName, name: "packageName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") + try self.validate(self.recipe, name: "recipe", parent: name, max: 3072) try self.validate(self.versionName, name: "versionName", parent: name, max: 64) try self.validate(self.versionName, name: "versionName", parent: name, min: 1) try self.validate(self.versionName, name: "versionName", parent: name, pattern: "^[a-zA-Z0-9-_.]+$") @@ -18556,8 +18935,10 @@ extension IoT { private enum CodingKeys: String, CodingKey { case action = "action" + case artifact = "artifact" case attributes = "attributes" case description = "description" + case recipe = "recipe" } } diff --git a/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_api.swift b/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_api.swift index 5468900eea..da1337a245 100644 --- a/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_api.swift +++ b/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_api.swift @@ -104,16 +104,19 @@ public struct IoTDeviceAdvisor: AWSService { /// Creates a Device Advisor test suite. Requires permission to access the CreateSuiteDefinition action. /// /// Parameters: + /// - clientToken: The client token for the test suite definition creation. This token is used for tracking test suite definition creation using retries and obtaining its status. This parameter is optional. /// - suiteDefinitionConfiguration: Creates a Device Advisor test suite with suite definition configuration. /// - tags: The tags to be attached to the suite definition. /// - logger: Logger use during operation @inlinable public func createSuiteDefinition( + clientToken: String? = CreateSuiteDefinitionRequest.idempotencyToken(), suiteDefinitionConfiguration: SuiteDefinitionConfiguration? = nil, tags: [String: String]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateSuiteDefinitionResponse { let input = CreateSuiteDefinitionRequest( + clientToken: clientToken, suiteDefinitionConfiguration: suiteDefinitionConfiguration, tags: tags ) diff --git a/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_shapes.swift b/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_shapes.swift index c88f75b9e0..18673d4ced 100644 --- a/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_shapes.swift +++ b/Sources/Soto/Services/IoTDeviceAdvisor/IoTDeviceAdvisor_shapes.swift @@ -88,18 +88,24 @@ extension IoTDeviceAdvisor { // MARK: Shapes public struct CreateSuiteDefinitionRequest: AWSEncodableShape { + /// The client token for the test suite definition creation. This token is used for tracking test suite definition creation using retries and obtaining its status. This parameter is optional. + public let clientToken: String? /// Creates a Device Advisor test suite with suite definition configuration. public let suiteDefinitionConfiguration: SuiteDefinitionConfiguration? /// The tags to be attached to the suite definition. public let tags: [String: String]? @inlinable - public init(suiteDefinitionConfiguration: SuiteDefinitionConfiguration? = nil, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateSuiteDefinitionRequest.idempotencyToken(), suiteDefinitionConfiguration: SuiteDefinitionConfiguration? = nil, tags: [String: String]? = nil) { + self.clientToken = clientToken self.suiteDefinitionConfiguration = suiteDefinitionConfiguration self.tags = tags } public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 64) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[\\u0021-\\u007E]+$") try self.suiteDefinitionConfiguration?.validate(name: "\(name).suiteDefinitionConfiguration") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) @@ -111,6 +117,7 @@ extension IoTDeviceAdvisor { } private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" case suiteDefinitionConfiguration = "suiteDefinitionConfiguration" case tags = "tags" } diff --git a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift index 2149c55fd3..4581c7094e 100644 --- a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift +++ b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_api.swift @@ -195,7 +195,6 @@ public struct IoTFleetWise: AWSService { /// - expiryTime: (Optional) The time the campaign expires, in seconds since epoch (January 1, 1970 at midnight UTC time). Vehicle data isn't collected after the campaign expires. Default: 253402214400 (December 31, 9999, 00:00:00 UTC) /// - name: The name of the campaign to create. /// - postTriggerCollectionDuration: (Optional) How long (in milliseconds) to collect raw data after a triggering event initiates the collection. If it's not specified, 0 is used. Default: 0 - /// - priority: (Optional) A number indicating the priority of one campaign over another campaign for a certain vehicle or fleet. A campaign with the lowest value is deployed to vehicles before any other campaigns. If it's not specified, 0 is used. Default: 0 /// - signalCatalogArn: The Amazon Resource Name (ARN) of the signal catalog to associate with the campaign. /// - signalsToCollect: (Optional) A list of information about signals to collect. /// - spoolingMode: (Optional) Whether to store collected data after a vehicle lost a connection with the cloud. After a connection is re-established, the data is automatically forwarded to Amazon Web Services IoT FleetWise. If you want to store collected data when a vehicle loses connection with the cloud, use TO_DISK. If it's not specified, OFF is used. Default: OFF @@ -214,7 +213,6 @@ public struct IoTFleetWise: AWSService { expiryTime: Date? = nil, name: String, postTriggerCollectionDuration: Int64? = nil, - priority: Int? = nil, signalCatalogArn: String, signalsToCollect: [SignalInformation]? = nil, spoolingMode: SpoolingMode? = nil, @@ -233,7 +231,6 @@ public struct IoTFleetWise: AWSService { expiryTime: expiryTime, name: name, postTriggerCollectionDuration: postTriggerCollectionDuration, - priority: priority, signalCatalogArn: signalCatalogArn, signalsToCollect: signalsToCollect, spoolingMode: spoolingMode, diff --git a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift index 0dbd5037b7..e553db21b0 100644 --- a/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift +++ b/Sources/Soto/Services/IoTFleetWise/IoTFleetWise_shapes.swift @@ -1000,6 +1000,7 @@ extension IoTFleetWise { } public func validate(name: String) throws { + try self.validate(self.conditionLanguageVersion, name: "conditionLanguageVersion", parent: name, max: 1) try self.validate(self.conditionLanguageVersion, name: "conditionLanguageVersion", parent: name, min: 1) try self.validate(self.expression, name: "expression", parent: name, max: 2048) try self.validate(self.expression, name: "expression", parent: name, min: 1) @@ -1049,6 +1050,27 @@ extension IoTFleetWise { /// The ARN of the vehicle or fleet to deploy a campaign to. public let targetArn: String + @inlinable + public init(collectionScheme: CollectionScheme, compression: Compression? = nil, dataDestinationConfigs: [DataDestinationConfig]? = nil, dataExtraDimensions: [String]? = nil, description: String? = nil, diagnosticsMode: DiagnosticsMode? = nil, expiryTime: Date? = nil, name: String, postTriggerCollectionDuration: Int64? = nil, signalCatalogArn: String, signalsToCollect: [SignalInformation]? = nil, spoolingMode: SpoolingMode? = nil, startTime: Date? = nil, tags: [Tag]? = nil, targetArn: String) { + self.collectionScheme = collectionScheme + self.compression = compression + self.dataDestinationConfigs = dataDestinationConfigs + self.dataExtraDimensions = dataExtraDimensions + self.description = description + self.diagnosticsMode = diagnosticsMode + self.expiryTime = expiryTime + self.name = name + self.postTriggerCollectionDuration = postTriggerCollectionDuration + self.priority = nil + self.signalCatalogArn = signalCatalogArn + self.signalsToCollect = signalsToCollect + self.spoolingMode = spoolingMode + self.startTime = startTime + self.tags = tags + self.targetArn = targetArn + } + + @available(*, deprecated, message: "Members priority have been deprecated") @inlinable public init(collectionScheme: CollectionScheme, compression: Compression? = nil, dataDestinationConfigs: [DataDestinationConfig]? = nil, dataExtraDimensions: [String]? = nil, description: String? = nil, diagnosticsMode: DiagnosticsMode? = nil, expiryTime: Date? = nil, name: String, postTriggerCollectionDuration: Int64? = nil, priority: Int? = nil, signalCatalogArn: String, signalsToCollect: [SignalInformation]? = nil, spoolingMode: SpoolingMode? = nil, startTime: Date? = nil, tags: [Tag]? = nil, targetArn: String) { self.collectionScheme = collectionScheme @@ -2828,6 +2850,9 @@ extension IoTFleetWise { try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.status, name: "status", parent: name, max: 20) + try self.validate(self.status, name: "status", parent: name, min: 7) + try self.validate(self.status, name: "status", parent: name, pattern: "^[A-Z_]*$") } private enum CodingKeys: CodingKey {} @@ -3339,6 +3364,7 @@ extension IoTFleetWise { public func validate(name: String) throws { try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:.*") } private enum CodingKeys: CodingKey {} @@ -4181,6 +4207,7 @@ extension IoTFleetWise { public func validate(name: String) throws { try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:.*") try self.tags.forEach { try $0.validate(name: "\(name).tags[]") } @@ -4206,7 +4233,7 @@ extension IoTFleetWise { } public func validate(name: String) throws { - try self.validate(self.periodMs, name: "periodMs", parent: name, max: 60000) + try self.validate(self.periodMs, name: "periodMs", parent: name, max: 86400000) try self.validate(self.periodMs, name: "periodMs", parent: name, min: 10000) } @@ -4325,6 +4352,7 @@ extension IoTFleetWise { public func validate(name: String) throws { try self.validate(self.resourceARN, name: "resourceARN", parent: name, max: 1011) try self.validate(self.resourceARN, name: "resourceARN", parent: name, min: 1) + try self.validate(self.resourceARN, name: "resourceARN", parent: name, pattern: "^arn:.*") try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) diff --git a/Sources/Soto/Services/Kinesis/Kinesis_api.swift b/Sources/Soto/Services/Kinesis/Kinesis_api.swift index 63f6902370..6d7f7cd63d 100644 --- a/Sources/Soto/Services/Kinesis/Kinesis_api.swift +++ b/Sources/Soto/Services/Kinesis/Kinesis_api.swift @@ -134,7 +134,7 @@ public struct Kinesis: AWSService { return try await self.addTagsToStream(input, logger: logger) } - /// Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You can create your data stream using either on-demand or provisioned capacity mode. Data streams with an on-demand mode require no capacity planning and automatically scale to handle gigabytes of write and read throughput per minute. With the on-demand mode, Kinesis Data Streams automatically manages the shards in order to provide the necessary throughput. For the data streams with a provisioned mode, you must specify the number of shards for the data stream. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second. If the amount of data input increases or decreases, you can add or remove shards. The stream name identifies the stream. The name is scoped to the Amazon Web Services account used by the application. It is also scoped by Amazon Web Services Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name. CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Kinesis Data Streams immediately returns and sets the stream status to CREATING. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream. You receive a LimitExceededException when making a CreateStream request when you try to do one of the following: Have more than five streams in the CREATING state at any point in time. Create more shards than are authorized for your account. For the default shard limit for an Amazon Web Services account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact Amazon Web Services Support. You can use DescribeStreamSummary to check the stream status, which is returned in StreamStatus. CreateStream has a limit of five transactions per second per account. + /// Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You can create your data stream using either on-demand or provisioned capacity mode. Data streams with an on-demand mode require no capacity planning and automatically scale to handle gigabytes of write and read throughput per minute. With the on-demand mode, Kinesis Data Streams automatically manages the shards in order to provide the necessary throughput. For the data streams with a provisioned mode, you must specify the number of shards for the data stream. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second. If the amount of data input increases or decreases, you can add or remove shards. The stream name identifies the stream. The name is scoped to the Amazon Web Services account used by the application. It is also scoped by Amazon Web Services Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name. CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Kinesis Data Streams immediately returns and sets the stream status to CREATING. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream. You receive a LimitExceededException when making a CreateStream request when you try to do one of the following: Have more than five streams in the CREATING state at any point in time. Create more shards than are authorized for your account. For the default shard limit for an Amazon Web Services account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact Amazon Web Services Support. You can use DescribeStreamSummary to check the stream status, which is returned in StreamStatus. CreateStream has a limit of five transactions per second per account. You can add tags to the stream when making a CreateStream request by setting the Tags parameter. If you pass Tags parameter, in addition to having kinesis:createStream permission, you must also have kinesis:addTagsToStream permission for the stream that will be created. Tags will take effect from the CREATING status of the stream. @Sendable @inlinable public func createStream(_ input: CreateStreamInput, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -147,24 +147,27 @@ public struct Kinesis: AWSService { logger: logger ) } - /// Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You can create your data stream using either on-demand or provisioned capacity mode. Data streams with an on-demand mode require no capacity planning and automatically scale to handle gigabytes of write and read throughput per minute. With the on-demand mode, Kinesis Data Streams automatically manages the shards in order to provide the necessary throughput. For the data streams with a provisioned mode, you must specify the number of shards for the data stream. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second. If the amount of data input increases or decreases, you can add or remove shards. The stream name identifies the stream. The name is scoped to the Amazon Web Services account used by the application. It is also scoped by Amazon Web Services Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name. CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Kinesis Data Streams immediately returns and sets the stream status to CREATING. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream. You receive a LimitExceededException when making a CreateStream request when you try to do one of the following: Have more than five streams in the CREATING state at any point in time. Create more shards than are authorized for your account. For the default shard limit for an Amazon Web Services account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact Amazon Web Services Support. You can use DescribeStreamSummary to check the stream status, which is returned in StreamStatus. CreateStream has a limit of five transactions per second per account. + /// Creates a Kinesis data stream. A stream captures and transports data records that are continuously emitted from different data sources or producers. Scale-out within a stream is explicitly supported by means of shards, which are uniquely identified groups of data records in a stream. You can create your data stream using either on-demand or provisioned capacity mode. Data streams with an on-demand mode require no capacity planning and automatically scale to handle gigabytes of write and read throughput per minute. With the on-demand mode, Kinesis Data Streams automatically manages the shards in order to provide the necessary throughput. For the data streams with a provisioned mode, you must specify the number of shards for the data stream. Each shard can support reads up to five transactions per second, up to a maximum data read total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per second. If the amount of data input increases or decreases, you can add or remove shards. The stream name identifies the stream. The name is scoped to the Amazon Web Services account used by the application. It is also scoped by Amazon Web Services Region. That is, two streams in two different accounts can have the same name, and two streams in the same account, but in two different Regions, can have the same name. CreateStream is an asynchronous operation. Upon receiving a CreateStream request, Kinesis Data Streams immediately returns and sets the stream status to CREATING. After the stream is created, Kinesis Data Streams sets the stream status to ACTIVE. You should perform read and write operations only on an ACTIVE stream. You receive a LimitExceededException when making a CreateStream request when you try to do one of the following: Have more than five streams in the CREATING state at any point in time. Create more shards than are authorized for your account. For the default shard limit for an Amazon Web Services account, see Amazon Kinesis Data Streams Limits in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, contact Amazon Web Services Support. You can use DescribeStreamSummary to check the stream status, which is returned in StreamStatus. CreateStream has a limit of five transactions per second per account. You can add tags to the stream when making a CreateStream request by setting the Tags parameter. If you pass Tags parameter, in addition to having kinesis:createStream permission, you must also have kinesis:addTagsToStream permission for the stream that will be created. Tags will take effect from the CREATING status of the stream. /// /// Parameters: /// - shardCount: The number of shards that the stream will use. The throughput of the stream is a function of the number of shards; more shards are required for greater provisioned throughput. /// - streamModeDetails: Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams. /// - streamName: A name to identify the stream. The stream name is scoped to the Amazon Web Services account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts can have the same name. Two streams in the same Amazon Web Services account but in two different Regions can also have the same name. + /// - tags: A set of up to 10 key-value pairs to use to create the tags. /// - logger: Logger use during operation @inlinable public func createStream( shardCount: Int? = nil, streamModeDetails: StreamModeDetails? = nil, streamName: String, + tags: [String: String]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws { let input = CreateStreamInput( shardCount: shardCount, streamModeDetails: streamModeDetails, - streamName: streamName + streamName: streamName, + tags: tags ) return try await self.createStream(input, logger: logger) } @@ -954,7 +957,7 @@ public struct Kinesis: AWSService { return try await self.putResourcePolicy(input, logger: logger) } - /// Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can then call SubscribeToShard to receive data from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every shard you subscribe to. This rate is unaffected by the total number of consumers that read from the same stream. You can register up to 20 consumers per stream. A given consumer can only be registered with one stream at a time. For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. The use of this operation has a limit of five transactions per second per account. Also, only 5 consumers can be created simultaneously. In other words, you cannot have more than 5 consumers in a CREATING status at the same time. Registering a 6th consumer while there are 5 in a CREATING status results in a LimitExceededException. + /// Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can then call SubscribeToShard to receive data from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every shard you subscribe to. This rate is unaffected by the total number of consumers that read from the same stream. You can register up to 20 consumers per stream. A given consumer can only be registered with one stream at a time. For an example of how to use this operation, see Enhanced Fan-Out Using the Kinesis Data Streams API. The use of this operation has a limit of five transactions per second per account. Also, only 5 consumers can be created simultaneously. In other words, you cannot have more than 5 consumers in a CREATING status at the same time. Registering a 6th consumer while there are 5 in a CREATING status results in a LimitExceededException. @Sendable @inlinable public func registerStreamConsumer(_ input: RegisterStreamConsumerInput, logger: Logger = AWSClient.loggingDisabled) async throws -> RegisterStreamConsumerOutput { @@ -967,7 +970,7 @@ public struct Kinesis: AWSService { logger: logger ) } - /// Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can then call SubscribeToShard to receive data from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every shard you subscribe to. This rate is unaffected by the total number of consumers that read from the same stream. You can register up to 20 consumers per stream. A given consumer can only be registered with one stream at a time. For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. The use of this operation has a limit of five transactions per second per account. Also, only 5 consumers can be created simultaneously. In other words, you cannot have more than 5 consumers in a CREATING status at the same time. Registering a 6th consumer while there are 5 in a CREATING status results in a LimitExceededException. + /// Registers a consumer with a Kinesis data stream. When you use this operation, the consumer you register can then call SubscribeToShard to receive data from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every shard you subscribe to. This rate is unaffected by the total number of consumers that read from the same stream. You can register up to 20 consumers per stream. A given consumer can only be registered with one stream at a time. For an example of how to use this operation, see Enhanced Fan-Out Using the Kinesis Data Streams API. The use of this operation has a limit of five transactions per second per account. Also, only 5 consumers can be created simultaneously. In other words, you cannot have more than 5 consumers in a CREATING status at the same time. Registering a 6th consumer while there are 5 in a CREATING status results in a LimitExceededException. /// /// Parameters: /// - consumerName: For a given Kinesis data stream, each consumer must have a unique name. However, consumer names don't have to be unique across data streams. @@ -1135,7 +1138,7 @@ public struct Kinesis: AWSService { return try await self.stopStreamEncryption(input, logger: logger) } - /// This operation establishes an HTTP/2 connection between the consumer you specify in the ConsumerARN parameter and the shard you specify in the ShardId parameter. After the connection is successfully established, Kinesis Data Streams pushes records from the shard to the consumer over this connection. Before you call this operation, call RegisterStreamConsumer to register the consumer with Kinesis Data Streams. When the SubscribeToShard call succeeds, your consumer starts receiving events of type SubscribeToShardEvent over the HTTP/2 connection for up to 5 minutes, after which time you need to call SubscribeToShard again to renew the subscription if you want to continue to receive records. You can make one call to SubscribeToShard per second per registered consumer per shard. For example, if you have a 4000 shard stream and two registered stream consumers, you can make one SubscribeToShard request per second for each combination of shard and registered consumer, allowing you to subscribe both consumers to all 4000 shards in one second. If you call SubscribeToShard again with the same ConsumerARN and ShardId within 5 seconds of a successful call, you'll get a ResourceInUseException. If you call SubscribeToShard 5 seconds or more after a successful call, the second call takes over the subscription and the previous connection expires or fails with a ResourceInUseException. For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. + /// This operation establishes an HTTP/2 connection between the consumer you specify in the ConsumerARN parameter and the shard you specify in the ShardId parameter. After the connection is successfully established, Kinesis Data Streams pushes records from the shard to the consumer over this connection. Before you call this operation, call RegisterStreamConsumer to register the consumer with Kinesis Data Streams. When the SubscribeToShard call succeeds, your consumer starts receiving events of type SubscribeToShardEvent over the HTTP/2 connection for up to 5 minutes, after which time you need to call SubscribeToShard again to renew the subscription if you want to continue to receive records. You can make one call to SubscribeToShard per second per registered consumer per shard. For example, if you have a 4000 shard stream and two registered stream consumers, you can make one SubscribeToShard request per second for each combination of shard and registered consumer, allowing you to subscribe both consumers to all 4000 shards in one second. If you call SubscribeToShard again with the same ConsumerARN and ShardId within 5 seconds of a successful call, you'll get a ResourceInUseException. If you call SubscribeToShard 5 seconds or more after a successful call, the second call takes over the subscription and the previous connection expires or fails with a ResourceInUseException. For an example of how to use this operation, see Enhanced Fan-Out Using the Kinesis Data Streams API. @Sendable @inlinable public func subscribeToShard(_ input: SubscribeToShardInput, logger: Logger = AWSClient.loggingDisabled) async throws -> SubscribeToShardOutput { @@ -1148,7 +1151,7 @@ public struct Kinesis: AWSService { logger: logger ) } - /// This operation establishes an HTTP/2 connection between the consumer you specify in the ConsumerARN parameter and the shard you specify in the ShardId parameter. After the connection is successfully established, Kinesis Data Streams pushes records from the shard to the consumer over this connection. Before you call this operation, call RegisterStreamConsumer to register the consumer with Kinesis Data Streams. When the SubscribeToShard call succeeds, your consumer starts receiving events of type SubscribeToShardEvent over the HTTP/2 connection for up to 5 minutes, after which time you need to call SubscribeToShard again to renew the subscription if you want to continue to receive records. You can make one call to SubscribeToShard per second per registered consumer per shard. For example, if you have a 4000 shard stream and two registered stream consumers, you can make one SubscribeToShard request per second for each combination of shard and registered consumer, allowing you to subscribe both consumers to all 4000 shards in one second. If you call SubscribeToShard again with the same ConsumerARN and ShardId within 5 seconds of a successful call, you'll get a ResourceInUseException. If you call SubscribeToShard 5 seconds or more after a successful call, the second call takes over the subscription and the previous connection expires or fails with a ResourceInUseException. For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API. + /// This operation establishes an HTTP/2 connection between the consumer you specify in the ConsumerARN parameter and the shard you specify in the ShardId parameter. After the connection is successfully established, Kinesis Data Streams pushes records from the shard to the consumer over this connection. Before you call this operation, call RegisterStreamConsumer to register the consumer with Kinesis Data Streams. When the SubscribeToShard call succeeds, your consumer starts receiving events of type SubscribeToShardEvent over the HTTP/2 connection for up to 5 minutes, after which time you need to call SubscribeToShard again to renew the subscription if you want to continue to receive records. You can make one call to SubscribeToShard per second per registered consumer per shard. For example, if you have a 4000 shard stream and two registered stream consumers, you can make one SubscribeToShard request per second for each combination of shard and registered consumer, allowing you to subscribe both consumers to all 4000 shards in one second. If you call SubscribeToShard again with the same ConsumerARN and ShardId within 5 seconds of a successful call, you'll get a ResourceInUseException. If you call SubscribeToShard 5 seconds or more after a successful call, the second call takes over the subscription and the previous connection expires or fails with a ResourceInUseException. For an example of how to use this operation, see Enhanced Fan-Out Using the Kinesis Data Streams API. /// /// Parameters: /// - consumerARN: For this parameter, use the value you obtained when you called RegisterStreamConsumer. diff --git a/Sources/Soto/Services/Kinesis/Kinesis_shapes.swift b/Sources/Soto/Services/Kinesis/Kinesis_shapes.swift index 74f9f15ba3..147d661a3d 100644 --- a/Sources/Soto/Services/Kinesis/Kinesis_shapes.swift +++ b/Sources/Soto/Services/Kinesis/Kinesis_shapes.swift @@ -282,12 +282,15 @@ extension Kinesis { public let streamModeDetails: StreamModeDetails? /// A name to identify the stream. The stream name is scoped to the Amazon Web Services account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts can have the same name. Two streams in the same Amazon Web Services account but in two different Regions can also have the same name. public let streamName: String + /// A set of up to 10 key-value pairs to use to create the tags. + public let tags: [String: String]? @inlinable - public init(shardCount: Int? = nil, streamModeDetails: StreamModeDetails? = nil, streamName: String) { + public init(shardCount: Int? = nil, streamModeDetails: StreamModeDetails? = nil, streamName: String, tags: [String: String]? = nil) { self.shardCount = shardCount self.streamModeDetails = streamModeDetails self.streamName = streamName + self.tags = tags } public func validate(name: String) throws { @@ -295,12 +298,20 @@ extension Kinesis { try self.validate(self.streamName, name: "streamName", parent: name, max: 128) try self.validate(self.streamName, name: "streamName", parent: name, min: 1) try self.validate(self.streamName, name: "streamName", parent: name, pattern: "^[a-zA-Z0-9_.-]+$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { case shardCount = "ShardCount" case streamModeDetails = "StreamModeDetails" case streamName = "StreamName" + case tags = "Tags" } } diff --git a/Sources/Soto/Services/Lambda/Lambda_api.swift b/Sources/Soto/Services/Lambda/Lambda_api.swift index eb6c69682e..f30cb2c54f 100644 --- a/Sources/Soto/Services/Lambda/Lambda_api.swift +++ b/Sources/Soto/Services/Lambda/Lambda_api.swift @@ -174,7 +174,7 @@ public struct Lambda: AWSService { return try await self.addLayerVersionPermission(input, logger: logger) } - /// Grants an Amazon Web Servicesservice, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Servicesservices, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Servicesservices, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function. This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda. + /// Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function. This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda. @Sendable @inlinable public func addPermission(_ input: AddPermissionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AddPermissionResponse { @@ -187,19 +187,19 @@ public struct Lambda: AWSService { logger: logger ) } - /// Grants an Amazon Web Servicesservice, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Servicesservices, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Servicesservices, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function. This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda. + /// Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function. This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda. /// /// Parameters: /// - action: The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction. /// - eventSourceToken: For Alexa Smart Home functions, a token that the invoker must supply. /// - functionName: The name or ARN of the Lambda function, version, or alias. Name formats Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. /// - functionUrlAuthType: The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs. - /// - principal: The Amazon Web Servicesservice or Amazon Web Services account that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service. + /// - principal: The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service. /// - principalOrgID: The identifier for your organization in Organizations. Use this to grant permissions to all the Amazon Web Services accounts under this organization. /// - qualifier: Specify a version or alias to add permissions to a published version of the function. /// - revisionId: Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it. - /// - sourceAccount: For Amazon Web Servicesservice, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account. - /// - sourceArn: For Amazon Web Servicesservices, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that Lambda configures the comparison using the StringLike operator. + /// - sourceAccount: For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account. + /// - sourceArn: For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that Lambda configures the comparison using the StringLike operator. /// - statementId: A statement identifier that differentiates the statement from others in the same policy. /// - logger: Logger use during operation @inlinable @@ -293,18 +293,21 @@ public struct Lambda: AWSService { /// - allowedPublishers: Signing profiles for this code signing configuration. /// - codeSigningPolicies: The code signing policies define the actions to take if the validation checks fail. /// - description: Descriptive name for this code signing configuration. + /// - tags: A list of tags to add to the code signing configuration. /// - logger: Logger use during operation @inlinable public func createCodeSigningConfig( allowedPublishers: AllowedPublishers, codeSigningPolicies: CodeSigningPolicies? = nil, description: String? = nil, + tags: [String: String]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateCodeSigningConfigResponse { let input = CreateCodeSigningConfigRequest( allowedPublishers: allowedPublishers, codeSigningPolicies: codeSigningPolicies, - description: description + description: description, + tags: tags ) return try await self.createCodeSigningConfig(input, logger: logger) } @@ -347,6 +350,7 @@ public struct Lambda: AWSService { /// - sourceAccessConfigurations: An array of authentication protocols or VPC components required to secure your event source. /// - startingPosition: The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka. /// - startingPositionTimestamp: With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future. + /// - tags: A list of tags to apply to the event source mapping. /// - topics: The name of the Kafka topic. /// - tumblingWindowInSeconds: (Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window. /// - logger: Logger use during operation @@ -374,6 +378,7 @@ public struct Lambda: AWSService { sourceAccessConfigurations: [SourceAccessConfiguration]? = nil, startingPosition: EventSourcePosition? = nil, startingPositionTimestamp: Date? = nil, + tags: [String: String]? = nil, topics: [String]? = nil, tumblingWindowInSeconds: Int? = nil, logger: Logger = AWSClient.loggingDisabled @@ -401,13 +406,14 @@ public struct Lambda: AWSService { sourceAccessConfigurations: sourceAccessConfigurations, startingPosition: startingPosition, startingPositionTimestamp: startingPositionTimestamp, + tags: tags, topics: topics, tumblingWindowInSeconds: tumblingWindowInSeconds ) return try await self.createEventSourceMapping(input, logger: logger) } - /// Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Servicesservices, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties. If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64. When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states. A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration. The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Servicesservice invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Servicesservices, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions. + /// Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties. If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64. When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states. A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration. The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions. @Sendable @inlinable public func createFunction(_ input: CreateFunctionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> FunctionConfiguration { @@ -420,7 +426,7 @@ public struct Lambda: AWSService { logger: logger ) } - /// Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Servicesservices, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties. If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64. When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states. A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration. The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Servicesservice invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Servicesservices, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions. + /// Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties. If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64. When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states. A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration. The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions. /// /// Parameters: /// - architectures: The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64. @@ -636,7 +642,7 @@ public struct Lambda: AWSService { return try await self.deleteEventSourceMapping(input, logger: logger) } - /// Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Servicesservices and resources that invoke your function directly, delete the trigger in the service where you originally configured it. + /// Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function directly, delete the trigger in the service where you originally configured it. @Sendable @inlinable public func deleteFunction(_ input: DeleteFunctionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -649,7 +655,7 @@ public struct Lambda: AWSService { logger: logger ) } - /// Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Servicesservices and resources that invoke your function directly, delete the trigger in the service where you originally configured it. + /// Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function directly, delete the trigger in the service where you originally configured it. /// /// Parameters: /// - functionName: The name or ARN of the Lambda function or version. Name formats Function name – my-function (name-only), my-function:1 (with version). Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -1859,7 +1865,7 @@ public struct Lambda: AWSService { return try await self.listProvisionedConcurrencyConfigs(input, logger: logger) } - /// Returns a function's tags. You can also view tags with GetFunction. + /// Returns a function, event source mapping, or code signing configuration's tags. You can also view function tags with GetFunction. @Sendable @inlinable public func listTags(_ input: ListTagsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsResponse { @@ -1872,10 +1878,10 @@ public struct Lambda: AWSService { logger: logger ) } - /// Returns a function's tags. You can also view tags with GetFunction. + /// Returns a function, event source mapping, or code signing configuration's tags. You can also view function tags with GetFunction. /// /// Parameters: - /// - resource: The function's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to aliases or versions. + /// - resource: The resource's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to function aliases or versions. /// - logger: Logger use during operation @inlinable public func listTags( @@ -2253,7 +2259,7 @@ public struct Lambda: AWSService { return try await self.removeLayerVersionPermission(input, logger: logger) } - /// Revokes function-use permission from an Amazon Web Servicesservice or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy. + /// Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy. @Sendable @inlinable public func removePermission(_ input: RemovePermissionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -2266,7 +2272,7 @@ public struct Lambda: AWSService { logger: logger ) } - /// Revokes function-use permission from an Amazon Web Servicesservice or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy. + /// Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy. /// /// Parameters: /// - functionName: The name or ARN of the Lambda function, version, or alias. Name formats Function name – my-function (name-only), my-function:v1 (with alias). Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. @@ -2291,7 +2297,7 @@ public struct Lambda: AWSService { return try await self.removePermission(input, logger: logger) } - /// Adds tags to a function. + /// Adds tags to a function, event source mapping, or code signing configuration. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -2304,11 +2310,11 @@ public struct Lambda: AWSService { logger: logger ) } - /// Adds tags to a function. + /// Adds tags to a function, event source mapping, or code signing configuration. /// /// Parameters: - /// - resource: The function's Amazon Resource Name (ARN). - /// - tags: A list of tags to apply to the function. + /// - resource: The resource's Amazon Resource Name (ARN). + /// - tags: A list of tags to apply to the resource. /// - logger: Logger use during operation @inlinable public func tagResource( @@ -2323,7 +2329,7 @@ public struct Lambda: AWSService { return try await self.tagResource(input, logger: logger) } - /// Removes tags from a function. + /// Removes tags from a function, event source mapping, or code signing configuration. @Sendable @inlinable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -2336,11 +2342,11 @@ public struct Lambda: AWSService { logger: logger ) } - /// Removes tags from a function. + /// Removes tags from a function, event source mapping, or code signing configuration. /// /// Parameters: - /// - resource: The function's Amazon Resource Name (ARN). - /// - tagKeys: A list of tag keys to remove from the function. + /// - resource: The resource's Amazon Resource Name (ARN). + /// - tagKeys: A list of tag keys to remove from the resource. /// - logger: Logger use during operation @inlinable public func untagResource( @@ -2570,7 +2576,7 @@ public struct Lambda: AWSService { return try await self.updateFunctionCode(input, logger: logger) } - /// Modify the version-specific settings of a Lambda function. When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Servicesservice, use AddPermission. + /// Modify the version-specific settings of a Lambda function. When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Services service, use AddPermission. @Sendable @inlinable public func updateFunctionConfiguration(_ input: UpdateFunctionConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> FunctionConfiguration { @@ -2583,7 +2589,7 @@ public struct Lambda: AWSService { logger: logger ) } - /// Modify the version-specific settings of a Lambda function. When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Servicesservice, use AddPermission. + /// Modify the version-specific settings of a Lambda function. When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Services service, use AddPermission. /// /// Parameters: /// - deadLetterConfig: A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues. diff --git a/Sources/Soto/Services/Lambda/Lambda_shapes.swift b/Sources/Soto/Services/Lambda/Lambda_shapes.swift index 8178e8cb15..716f2c1331 100644 --- a/Sources/Soto/Services/Lambda/Lambda_shapes.swift +++ b/Sources/Soto/Services/Lambda/Lambda_shapes.swift @@ -456,7 +456,7 @@ extension Lambda { public let functionName: String /// The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs. public let functionUrlAuthType: FunctionUrlAuthType? - /// The Amazon Web Servicesservice or Amazon Web Services account that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service. + /// The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service. public let principal: String /// The identifier for your organization in Organizations. Use this to grant permissions to all the Amazon Web Services accounts under this organization. public let principalOrgID: String? @@ -464,9 +464,9 @@ extension Lambda { public let qualifier: String? /// Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it. public let revisionId: String? - /// For Amazon Web Servicesservice, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account. + /// For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account. public let sourceAccount: String? - /// For Amazon Web Servicesservices, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that Lambda configures the comparison using the StringLike operator. + /// For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that Lambda configures the comparison using the StringLike operator. public let sourceArn: String? /// A statement identifier that differentiates the statement from others in the same policy. public let statementId: String @@ -833,12 +833,15 @@ extension Lambda { public let codeSigningPolicies: CodeSigningPolicies? /// Descriptive name for this code signing configuration. public let description: String? + /// A list of tags to add to the code signing configuration. + public let tags: [String: String]? @inlinable - public init(allowedPublishers: AllowedPublishers, codeSigningPolicies: CodeSigningPolicies? = nil, description: String? = nil) { + public init(allowedPublishers: AllowedPublishers, codeSigningPolicies: CodeSigningPolicies? = nil, description: String? = nil, tags: [String: String]? = nil) { self.allowedPublishers = allowedPublishers self.codeSigningPolicies = codeSigningPolicies self.description = description + self.tags = tags } public func validate(name: String) throws { @@ -850,6 +853,7 @@ extension Lambda { case allowedPublishers = "AllowedPublishers" case codeSigningPolicies = "CodeSigningPolicies" case description = "Description" + case tags = "Tags" } } @@ -912,13 +916,15 @@ extension Lambda { public let startingPosition: EventSourcePosition? /// With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future. public let startingPositionTimestamp: Date? + /// A list of tags to apply to the event source mapping. + public let tags: [String: String]? /// The name of the Kafka topic. public let topics: [String]? /// (Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window. public let tumblingWindowInSeconds: Int? @inlinable - public init(amazonManagedKafkaEventSourceConfig: AmazonManagedKafkaEventSourceConfig? = nil, batchSize: Int? = nil, bisectBatchOnFunctionError: Bool? = nil, destinationConfig: DestinationConfig? = nil, documentDBEventSourceConfig: DocumentDBEventSourceConfig? = nil, enabled: Bool? = nil, eventSourceArn: String? = nil, filterCriteria: FilterCriteria? = nil, functionName: String, functionResponseTypes: [FunctionResponseType]? = nil, kmsKeyArn: String? = nil, maximumBatchingWindowInSeconds: Int? = nil, maximumRecordAgeInSeconds: Int? = nil, maximumRetryAttempts: Int? = nil, parallelizationFactor: Int? = nil, queues: [String]? = nil, scalingConfig: ScalingConfig? = nil, selfManagedEventSource: SelfManagedEventSource? = nil, selfManagedKafkaEventSourceConfig: SelfManagedKafkaEventSourceConfig? = nil, sourceAccessConfigurations: [SourceAccessConfiguration]? = nil, startingPosition: EventSourcePosition? = nil, startingPositionTimestamp: Date? = nil, topics: [String]? = nil, tumblingWindowInSeconds: Int? = nil) { + public init(amazonManagedKafkaEventSourceConfig: AmazonManagedKafkaEventSourceConfig? = nil, batchSize: Int? = nil, bisectBatchOnFunctionError: Bool? = nil, destinationConfig: DestinationConfig? = nil, documentDBEventSourceConfig: DocumentDBEventSourceConfig? = nil, enabled: Bool? = nil, eventSourceArn: String? = nil, filterCriteria: FilterCriteria? = nil, functionName: String, functionResponseTypes: [FunctionResponseType]? = nil, kmsKeyArn: String? = nil, maximumBatchingWindowInSeconds: Int? = nil, maximumRecordAgeInSeconds: Int? = nil, maximumRetryAttempts: Int? = nil, parallelizationFactor: Int? = nil, queues: [String]? = nil, scalingConfig: ScalingConfig? = nil, selfManagedEventSource: SelfManagedEventSource? = nil, selfManagedKafkaEventSourceConfig: SelfManagedKafkaEventSourceConfig? = nil, sourceAccessConfigurations: [SourceAccessConfiguration]? = nil, startingPosition: EventSourcePosition? = nil, startingPositionTimestamp: Date? = nil, tags: [String: String]? = nil, topics: [String]? = nil, tumblingWindowInSeconds: Int? = nil) { self.amazonManagedKafkaEventSourceConfig = amazonManagedKafkaEventSourceConfig self.batchSize = batchSize self.bisectBatchOnFunctionError = bisectBatchOnFunctionError @@ -941,6 +947,7 @@ extension Lambda { self.sourceAccessConfigurations = sourceAccessConfigurations self.startingPosition = startingPosition self.startingPositionTimestamp = startingPositionTimestamp + self.tags = tags self.topics = topics self.tumblingWindowInSeconds = tumblingWindowInSeconds } @@ -1014,6 +1021,7 @@ extension Lambda { case sourceAccessConfigurations = "SourceAccessConfigurations" case startingPosition = "StartingPosition" case startingPositionTimestamp = "StartingPositionTimestamp" + case tags = "Tags" case topics = "Topics" case tumblingWindowInSeconds = "TumblingWindowInSeconds" } @@ -1685,6 +1693,8 @@ extension Lambda { public let documentDBEventSourceConfig: DocumentDBEventSourceConfig? /// The Amazon Resource Name (ARN) of the event source. public let eventSourceArn: String? + /// The Amazon Resource Name (ARN) of the event source mapping. + public let eventSourceMappingArn: String? /// An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering. If filter criteria is encrypted, this field shows up as null in the response of ListEventSourceMapping API calls. You can view this field in plaintext in the response of GetEventSourceMapping and DeleteEventSourceMapping calls if you have kms:Decrypt permissions for the correct KMS key. public let filterCriteria: FilterCriteria? /// An object that contains details about an error related to filter criteria encryption. @@ -1735,13 +1745,14 @@ extension Lambda { public let uuid: String? @inlinable - public init(amazonManagedKafkaEventSourceConfig: AmazonManagedKafkaEventSourceConfig? = nil, batchSize: Int? = nil, bisectBatchOnFunctionError: Bool? = nil, destinationConfig: DestinationConfig? = nil, documentDBEventSourceConfig: DocumentDBEventSourceConfig? = nil, eventSourceArn: String? = nil, filterCriteria: FilterCriteria? = nil, filterCriteriaError: FilterCriteriaError? = nil, functionArn: String? = nil, functionResponseTypes: [FunctionResponseType]? = nil, kmsKeyArn: String? = nil, lastModified: Date? = nil, lastProcessingResult: String? = nil, maximumBatchingWindowInSeconds: Int? = nil, maximumRecordAgeInSeconds: Int? = nil, maximumRetryAttempts: Int? = nil, parallelizationFactor: Int? = nil, queues: [String]? = nil, scalingConfig: ScalingConfig? = nil, selfManagedEventSource: SelfManagedEventSource? = nil, selfManagedKafkaEventSourceConfig: SelfManagedKafkaEventSourceConfig? = nil, sourceAccessConfigurations: [SourceAccessConfiguration]? = nil, startingPosition: EventSourcePosition? = nil, startingPositionTimestamp: Date? = nil, state: String? = nil, stateTransitionReason: String? = nil, topics: [String]? = nil, tumblingWindowInSeconds: Int? = nil, uuid: String? = nil) { + public init(amazonManagedKafkaEventSourceConfig: AmazonManagedKafkaEventSourceConfig? = nil, batchSize: Int? = nil, bisectBatchOnFunctionError: Bool? = nil, destinationConfig: DestinationConfig? = nil, documentDBEventSourceConfig: DocumentDBEventSourceConfig? = nil, eventSourceArn: String? = nil, eventSourceMappingArn: String? = nil, filterCriteria: FilterCriteria? = nil, filterCriteriaError: FilterCriteriaError? = nil, functionArn: String? = nil, functionResponseTypes: [FunctionResponseType]? = nil, kmsKeyArn: String? = nil, lastModified: Date? = nil, lastProcessingResult: String? = nil, maximumBatchingWindowInSeconds: Int? = nil, maximumRecordAgeInSeconds: Int? = nil, maximumRetryAttempts: Int? = nil, parallelizationFactor: Int? = nil, queues: [String]? = nil, scalingConfig: ScalingConfig? = nil, selfManagedEventSource: SelfManagedEventSource? = nil, selfManagedKafkaEventSourceConfig: SelfManagedKafkaEventSourceConfig? = nil, sourceAccessConfigurations: [SourceAccessConfiguration]? = nil, startingPosition: EventSourcePosition? = nil, startingPositionTimestamp: Date? = nil, state: String? = nil, stateTransitionReason: String? = nil, topics: [String]? = nil, tumblingWindowInSeconds: Int? = nil, uuid: String? = nil) { self.amazonManagedKafkaEventSourceConfig = amazonManagedKafkaEventSourceConfig self.batchSize = batchSize self.bisectBatchOnFunctionError = bisectBatchOnFunctionError self.destinationConfig = destinationConfig self.documentDBEventSourceConfig = documentDBEventSourceConfig self.eventSourceArn = eventSourceArn + self.eventSourceMappingArn = eventSourceMappingArn self.filterCriteria = filterCriteria self.filterCriteriaError = filterCriteriaError self.functionArn = functionArn @@ -1774,6 +1785,7 @@ extension Lambda { case destinationConfig = "DestinationConfig" case documentDBEventSourceConfig = "DocumentDBEventSourceConfig" case eventSourceArn = "EventSourceArn" + case eventSourceMappingArn = "EventSourceMappingArn" case filterCriteria = "FilterCriteria" case filterCriteriaError = "FilterCriteriaError" case functionArn = "FunctionArn" @@ -3893,7 +3905,7 @@ extension Lambda { } public struct ListTagsRequest: AWSEncodableShape { - /// The function's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to aliases or versions. + /// The resource's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to function aliases or versions. public let resource: String @inlinable @@ -3908,7 +3920,9 @@ extension Lambda { } public func validate(name: String) throws { - try self.validate(self.resource, name: "resource", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$") + try self.validate(self.resource, name: "resource", parent: name, max: 256) + try self.validate(self.resource, name: "resource", parent: name, min: 1) + try self.validate(self.resource, name: "resource", parent: name, pattern: "^arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$") } private enum CodingKeys: CodingKey {} @@ -4784,9 +4798,9 @@ extension Lambda { } public struct TagResourceRequest: AWSEncodableShape { - /// The function's Amazon Resource Name (ARN). + /// The resource's Amazon Resource Name (ARN). public let resource: String - /// A list of tags to apply to the function. + /// A list of tags to apply to the resource. public let tags: [String: String] @inlinable @@ -4803,7 +4817,9 @@ extension Lambda { } public func validate(name: String) throws { - try self.validate(self.resource, name: "resource", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$") + try self.validate(self.resource, name: "resource", parent: name, max: 256) + try self.validate(self.resource, name: "resource", parent: name, min: 1) + try self.validate(self.resource, name: "resource", parent: name, pattern: "^arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$") } private enum CodingKeys: String, CodingKey { @@ -4840,9 +4856,9 @@ extension Lambda { } public struct UntagResourceRequest: AWSEncodableShape { - /// The function's Amazon Resource Name (ARN). + /// The resource's Amazon Resource Name (ARN). public let resource: String - /// A list of tag keys to remove from the function. + /// A list of tag keys to remove from the resource. public let tagKeys: [String] @inlinable @@ -4859,7 +4875,9 @@ extension Lambda { } public func validate(name: String) throws { - try self.validate(self.resource, name: "resource", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$") + try self.validate(self.resource, name: "resource", parent: name, max: 256) + try self.validate(self.resource, name: "resource", parent: name, min: 1) + try self.validate(self.resource, name: "resource", parent: name, pattern: "^arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$") } private enum CodingKeys: CodingKey {} diff --git a/Sources/Soto/Services/MailManager/MailManager_api.swift b/Sources/Soto/Services/MailManager/MailManager_api.swift index 0fbfb1f631..e50a6185bf 100644 --- a/Sources/Soto/Services/MailManager/MailManager_api.swift +++ b/Sources/Soto/Services/MailManager/MailManager_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS MailManager service. /// -/// AWS SES Mail Manager API AWS SES Mail Manager API contains operations and data types that comprise the Mail Manager feature of Amazon Simple Email Service. Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen your organization's email infrastructure, simplify email workflow management, and streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer Guide. +/// Amazon SES Mail Manager API The Amazon SES Mail Manager API contains operations and data types that comprise the Mail Manager feature of Amazon Simple Email Service (SES). Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen your organization's email infrastructure, simplify email workflow management, and streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer Guide. /// API Reference: https://w.amazon.com/bin/view/AWS/Border public struct MailManager: AWSService { // MARK: Member variables @@ -1260,6 +1260,7 @@ public struct MailManager: AWSService { /// - exportDestinationConfiguration: Details on where to deliver the exported email data. /// - filters: Criteria to filter which emails are included in the export. /// - fromTimestamp: The start of the timestamp range to include emails from. + /// - includeMetadata: Whether to include message metadata as JSON files in the export. /// - maxResults: The maximum number of email items to include in the export. /// - toTimestamp: The end of the timestamp range to include emails from. /// - logger: Logger use during operation @@ -1269,6 +1270,7 @@ public struct MailManager: AWSService { exportDestinationConfiguration: ExportDestinationConfiguration, filters: ArchiveFilters? = nil, fromTimestamp: Date, + includeMetadata: Bool? = nil, maxResults: Int? = nil, toTimestamp: Date, logger: Logger = AWSClient.loggingDisabled @@ -1278,6 +1280,7 @@ public struct MailManager: AWSService { exportDestinationConfiguration: exportDestinationConfiguration, filters: filters, fromTimestamp: fromTimestamp, + includeMetadata: includeMetadata, maxResults: maxResults, toTimestamp: toTimestamp ) @@ -1567,7 +1570,7 @@ public struct MailManager: AWSService { return try await self.updateRelay(input, logger: logger) } - /// >Update attributes of an already provisioned rule set. + /// Update attributes of an already provisioned rule set. @Sendable @inlinable public func updateRuleSet(_ input: UpdateRuleSetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRuleSetResponse { @@ -1580,7 +1583,7 @@ public struct MailManager: AWSService { logger: logger ) } - /// >Update attributes of an already provisioned rule set. + /// Update attributes of an already provisioned rule set. /// /// Parameters: /// - rules: A new set of rules to replace the current rules of the rule set—these rules will override all the rules of the rule set. diff --git a/Sources/Soto/Services/MailManager/MailManager_shapes.swift b/Sources/Soto/Services/MailManager/MailManager_shapes.swift index 318c0c755c..c3b0a84aaf 100644 --- a/Sources/Soto/Services/MailManager/MailManager_shapes.swift +++ b/Sources/Soto/Services/MailManager/MailManager_shapes.swift @@ -57,6 +57,8 @@ extension MailManager { public enum ArchiveStringEmailAttribute: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cc = "CC" + case envelopeFrom = "ENVELOPE_FROM" + case envelopeTo = "ENVELOPE_TO" case from = "FROM" case subject = "SUBJECT" case to = "TO" @@ -675,6 +677,56 @@ extension MailManager { } } + public enum RuleStringToEvaluate: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The email attribute to evaluate in a string condition expression. + case attribute(RuleStringEmailAttribute) + /// The email MIME X-Header attribute to evaluate in a string condition expression. + case mimeHeaderAttribute(String) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .attribute: + let value = try container.decode(RuleStringEmailAttribute.self, forKey: .attribute) + self = .attribute(value) + case .mimeHeaderAttribute: + let value = try container.decode(String.self, forKey: .mimeHeaderAttribute) + self = .mimeHeaderAttribute(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .attribute(let value): + try container.encode(value, forKey: .attribute) + case .mimeHeaderAttribute(let value): + try container.encode(value, forKey: .mimeHeaderAttribute) + } + } + + public func validate(name: String) throws { + switch self { + case .mimeHeaderAttribute(let value): + try self.validate(value, name: "mimeHeaderAttribute", parent: name, pattern: "^X-[a-zA-Z0-9-]{1,256}$") + default: + break + } + } + + private enum CodingKeys: String, CodingKey { + case attribute = "Attribute" + case mimeHeaderAttribute = "MimeHeaderAttribute" + } + } + public enum RuleVerdictToEvaluate: AWSEncodableShape & AWSDecodableShape, Sendable { /// The Add On ARN and its returned value to evaluate in a verdict condition expression. case analysis(Analysis) @@ -947,6 +999,10 @@ extension MailManager { } public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, max: 2048) + try validate($0, name: "values[]", parent: name, min: 1) + } try self.validate(self.values, name: "values", parent: name, max: 10) try self.validate(self.values, name: "values", parent: name, min: 1) } @@ -1562,6 +1618,28 @@ extension MailManager { public init() {} } + public struct Envelope: AWSDecodableShape { + /// The RCPT FROM given by the host from which the email was received. + public let from: String? + /// The HELO used by the host from which the email was received. + public let helo: String? + /// All SMTP TO entries given by the host from which the email was received. + public let to: [String]? + + @inlinable + public init(from: String? = nil, helo: String? = nil, to: [String]? = nil) { + self.from = from + self.helo = helo + self.to = to + } + + private enum CodingKeys: String, CodingKey { + case from = "From" + case helo = "Helo" + case to = "To" + } + } + public struct ExportStatus: AWSDecodableShape { /// The timestamp of when the export job completed (if finished). public let completionTimestamp: Date? @@ -1794,16 +1872,24 @@ extension MailManager { } public struct GetArchiveMessageResponse: AWSDecodableShape { + /// The SMTP envelope information of the email. + public let envelope: Envelope? /// A pre-signed URL to temporarily download the full message content. public let messageDownloadLink: String? + /// The metadata about the email. + public let metadata: Metadata? @inlinable - public init(messageDownloadLink: String? = nil) { + public init(envelope: Envelope? = nil, messageDownloadLink: String? = nil, metadata: Metadata? = nil) { + self.envelope = envelope self.messageDownloadLink = messageDownloadLink + self.metadata = metadata } private enum CodingKeys: String, CodingKey { + case envelope = "Envelope" case messageDownloadLink = "MessageDownloadLink" + case metadata = "Metadata" } } @@ -2847,6 +2933,48 @@ extension MailManager { } } + public struct Metadata: AWSDecodableShape { + /// The ID of the ingress endpoint through which the email was received. + public let ingressPointId: String? + /// The ID of the rule set that processed the email. + public let ruleSetId: String? + /// The name of the host from which the email was received. + public let senderHostname: String? + /// The IP address of the host from which the email was received. + public let senderIpAddress: String? + /// The timestamp of when the email was received. + public let timestamp: Date? + /// The TLS cipher suite used to communicate with the host from which the email was received. + public let tlsCipherSuite: String? + /// The TLS protocol used to communicate with the host from which the email was received. + public let tlsProtocol: String? + /// The ID of the traffic policy that was in effect when the email was received. + public let trafficPolicyId: String? + + @inlinable + public init(ingressPointId: String? = nil, ruleSetId: String? = nil, senderHostname: String? = nil, senderIpAddress: String? = nil, timestamp: Date? = nil, tlsCipherSuite: String? = nil, tlsProtocol: String? = nil, trafficPolicyId: String? = nil) { + self.ingressPointId = ingressPointId + self.ruleSetId = ruleSetId + self.senderHostname = senderHostname + self.senderIpAddress = senderIpAddress + self.timestamp = timestamp + self.tlsCipherSuite = tlsCipherSuite + self.tlsProtocol = tlsProtocol + self.trafficPolicyId = trafficPolicyId + } + + private enum CodingKeys: String, CodingKey { + case ingressPointId = "IngressPointId" + case ruleSetId = "RuleSetId" + case senderHostname = "SenderHostname" + case senderIpAddress = "SenderIpAddress" + case timestamp = "Timestamp" + case tlsCipherSuite = "TlsCipherSuite" + case tlsProtocol = "TlsProtocol" + case trafficPolicyId = "TrafficPolicyId" + } + } + public struct NoAuthentication: AWSEncodableShape & AWSDecodableShape { public init() {} } @@ -2956,10 +3084,14 @@ extension MailManager { public let cc: String? /// The date the email was sent. public let date: String? + /// The SMTP envelope information of the email. + public let envelope: Envelope? /// The email address of the sender. public let from: String? /// A flag indicating if the email has attachments. public let hasAttachments: Bool? + /// The ID of the ingress endpoint through which the email was received. + public let ingressPointId: String? /// The email message ID this is a reply to. public let inReplyTo: String? /// The unique message ID of the email. @@ -2968,6 +3100,10 @@ extension MailManager { public let receivedHeaders: [String]? /// The timestamp of when the email was received. public let receivedTimestamp: Date? + /// The name of the host from which the email was received. + public let senderHostname: String? + /// The IP address of the host from which the email was received. + public let senderIpAddress: String? /// The subject header value of the email. public let subject: String? /// The email addresses in the To header. @@ -2980,16 +3116,20 @@ extension MailManager { public let xPriority: String? @inlinable - public init(archivedMessageId: String? = nil, cc: String? = nil, date: String? = nil, from: String? = nil, hasAttachments: Bool? = nil, inReplyTo: String? = nil, messageId: String? = nil, receivedHeaders: [String]? = nil, receivedTimestamp: Date? = nil, subject: String? = nil, to: String? = nil, xMailer: String? = nil, xOriginalMailer: String? = nil, xPriority: String? = nil) { + public init(archivedMessageId: String? = nil, cc: String? = nil, date: String? = nil, envelope: Envelope? = nil, from: String? = nil, hasAttachments: Bool? = nil, ingressPointId: String? = nil, inReplyTo: String? = nil, messageId: String? = nil, receivedHeaders: [String]? = nil, receivedTimestamp: Date? = nil, senderHostname: String? = nil, senderIpAddress: String? = nil, subject: String? = nil, to: String? = nil, xMailer: String? = nil, xOriginalMailer: String? = nil, xPriority: String? = nil) { self.archivedMessageId = archivedMessageId self.cc = cc self.date = date + self.envelope = envelope self.from = from self.hasAttachments = hasAttachments + self.ingressPointId = ingressPointId self.inReplyTo = inReplyTo self.messageId = messageId self.receivedHeaders = receivedHeaders self.receivedTimestamp = receivedTimestamp + self.senderHostname = senderHostname + self.senderIpAddress = senderIpAddress self.subject = subject self.to = to self.xMailer = xMailer @@ -3001,12 +3141,16 @@ extension MailManager { case archivedMessageId = "ArchivedMessageId" case cc = "Cc" case date = "Date" + case envelope = "Envelope" case from = "From" case hasAttachments = "HasAttachments" + case ingressPointId = "IngressPointId" case inReplyTo = "InReplyTo" case messageId = "MessageId" case receivedHeaders = "ReceivedHeaders" case receivedTimestamp = "ReceivedTimestamp" + case senderHostname = "SenderHostname" + case senderIpAddress = "SenderIpAddress" case subject = "Subject" case to = "To" case xMailer = "XMailer" @@ -3193,6 +3337,7 @@ extension MailManager { } public func validate(name: String) throws { + try self.evaluate.validate(name: "\(name).evaluate") try self.values.forEach { try validate($0, name: "values[]", parent: name, max: 4096) try validate($0, name: "values[]", parent: name, min: 1) @@ -3376,17 +3521,20 @@ extension MailManager { public let filters: ArchiveFilters? /// The start of the timestamp range to include emails from. public let fromTimestamp: Date + /// Whether to include message metadata as JSON files in the export. + public let includeMetadata: Bool? /// The maximum number of email items to include in the export. public let maxResults: Int? /// The end of the timestamp range to include emails from. public let toTimestamp: Date @inlinable - public init(archiveId: String, exportDestinationConfiguration: ExportDestinationConfiguration, filters: ArchiveFilters? = nil, fromTimestamp: Date, maxResults: Int? = nil, toTimestamp: Date) { + public init(archiveId: String, exportDestinationConfiguration: ExportDestinationConfiguration, filters: ArchiveFilters? = nil, fromTimestamp: Date, includeMetadata: Bool? = nil, maxResults: Int? = nil, toTimestamp: Date) { self.archiveId = archiveId self.exportDestinationConfiguration = exportDestinationConfiguration self.filters = filters self.fromTimestamp = fromTimestamp + self.includeMetadata = includeMetadata self.maxResults = maxResults self.toTimestamp = toTimestamp } @@ -3404,6 +3552,7 @@ extension MailManager { case exportDestinationConfiguration = "ExportDestinationConfiguration" case filters = "Filters" case fromTimestamp = "FromTimestamp" + case includeMetadata = "IncludeMetadata" case maxResults = "MaxResults" case toTimestamp = "ToTimestamp" } @@ -4015,20 +4164,6 @@ extension MailManager { case attribute = "Attribute" } } - - public struct RuleStringToEvaluate: AWSEncodableShape & AWSDecodableShape { - /// The email attribute to evaluate in a string condition expression. - public let attribute: RuleStringEmailAttribute? - - @inlinable - public init(attribute: RuleStringEmailAttribute? = nil) { - self.attribute = attribute - } - - private enum CodingKeys: String, CodingKey { - case attribute = "Attribute" - } - } } // MARK: - Errors diff --git a/Sources/Soto/Services/MarketplaceReporting/MarketplaceReporting_api.swift b/Sources/Soto/Services/MarketplaceReporting/MarketplaceReporting_api.swift new file mode 100644 index 0000000000..a19b744904 --- /dev/null +++ b/Sources/Soto/Services/MarketplaceReporting/MarketplaceReporting_api.swift @@ -0,0 +1,123 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_exported import SotoCore + +/// Service object for interacting with AWS MarketplaceReporting service. +/// +/// The Amazon Web Services Marketplace GetBuyerDashboard API enables you to get a procurement insights dashboard programmatically. The API gets the agreement and cost analysis dashboards with data for all of the Amazon Web Services accounts in your Amazon Web Services Organization. To use the Amazon Web Services Marketplace Reporting API, you must complete the following prerequisites: Enable all features for your organization. For more information, see Enabling all features for an organization with Organizations, in the Organizations User Guide. Call the service as the Organizations management account or an account registered as a delegated administrator for the procurement insights service. For more information about management accounts, see Tutorial: Creating and configuring an organization and Managing the management account with Organizations, both in the Organizations User Guide. For more information about delegated administrators, see Using delegated administrators, in the Amazon Web Services Marketplace Buyer Guide. Create an IAM policy that enables the aws-marketplace:GetBuyerDashboard and organizations:DescribeOrganization permissions. In addition, the management account requires the organizations:EnableAWSServiceAccess and iam:CreateServiceLinkedRole permissions to create. For more information about creating the policy, see Policies and permissions in Identity and Access Management, in the IAM User Guide. Access can be shared only by registering the desired linked account as a delegated administrator. That requires organizations:RegisterDelegatedAdministrator organizations:ListDelegatedAdministrators and organizations:DeregisterDelegatedAdministrator permissions. Use the Amazon Web Services Marketplace console to create the AWSServiceRoleForProcurementInsightsPolicy service-linked role. The role enables Amazon Web Services Marketplace procurement visibility integration. The management account requires an IAM policy with the organizations:EnableAWSServiceAccess and iam:CreateServiceLinkedRole permissions to create the service-linked role and enable the service access. For more information, see Granting access to Organizations and Service-linked role to share procurement data in the Amazon Web Services Marketplace Buyer Guide. After creating the service-linked role, you must enable trusted access that grants Amazon Web Services Marketplace permission to access data from your Organizations. For more information, see Granting access to Organizations in the Amazon Web Services Marketplace Buyer Guide. +public struct MarketplaceReporting: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the MarketplaceReporting client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "MarketplaceReporting", + serviceIdentifier: "reporting-marketplace", + signingName: "aws-marketplace", + serviceProtocol: .restjson, + apiVersion: "2018-05-10", + endpoint: endpoint, + errorType: MarketplaceReportingErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// Generates an embedding URL for an Amazon QuickSight dashboard for an anonymous user. This API is available only to Amazon Web Services Organization management accounts or delegated administrators registered for the procurement insights (procurement-insights.marketplace.amazonaws.com) feature. The following rules apply to a generated URL: It contains a temporary bearer token, valid for 5 minutes after it is generated. Once redeemed within that period, it cannot be re-used again. It has a session lifetime of one hour. The 5-minute validity period runs separately from the session lifetime. + @Sendable + @inlinable + public func getBuyerDashboard(_ input: GetBuyerDashboardInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBuyerDashboardOutput { + try await self.client.execute( + operation: "GetBuyerDashboard", + path: "/getBuyerDashboard", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Generates an embedding URL for an Amazon QuickSight dashboard for an anonymous user. This API is available only to Amazon Web Services Organization management accounts or delegated administrators registered for the procurement insights (procurement-insights.marketplace.amazonaws.com) feature. The following rules apply to a generated URL: It contains a temporary bearer token, valid for 5 minutes after it is generated. Once redeemed within that period, it cannot be re-used again. It has a session lifetime of one hour. The 5-minute validity period runs separately from the session lifetime. + /// + /// Parameters: + /// - dashboardIdentifier: The ARN of the requested dashboard. + /// - embeddingDomains: Fully qualified domains that you add to the allow list for access to the generated URL that is then embedded. You can list up to two domains or subdomains in each API call. To include all subdomains under a specific domain, use *. For example, https://*.amazon.com includes all subdomains under https://aws.amazon.com. + /// - logger: Logger use during operation + @inlinable + public func getBuyerDashboard( + dashboardIdentifier: String, + embeddingDomains: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetBuyerDashboardOutput { + let input = GetBuyerDashboardInput( + dashboardIdentifier: dashboardIdentifier, + embeddingDomains: embeddingDomains + ) + return try await self.getBuyerDashboard(input, logger: logger) + } +} + +extension MarketplaceReporting { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: MarketplaceReporting, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} diff --git a/Sources/Soto/Services/MarketplaceReporting/MarketplaceReporting_shapes.swift b/Sources/Soto/Services/MarketplaceReporting/MarketplaceReporting_shapes.swift new file mode 100644 index 0000000000..2b776b956b --- /dev/null +++ b/Sources/Soto/Services/MarketplaceReporting/MarketplaceReporting_shapes.swift @@ -0,0 +1,134 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension MarketplaceReporting { + // MARK: Enums + + // MARK: Shapes + + public struct GetBuyerDashboardInput: AWSEncodableShape { + /// The ARN of the requested dashboard. + public let dashboardIdentifier: String + /// Fully qualified domains that you add to the allow list for access to the generated URL that is then embedded. You can list up to two domains or subdomains in each API call. To include all subdomains under a specific domain, use *. For example, https://*.amazon.com includes all subdomains under https://aws.amazon.com. + public let embeddingDomains: [String] + + @inlinable + public init(dashboardIdentifier: String, embeddingDomains: [String]) { + self.dashboardIdentifier = dashboardIdentifier + self.embeddingDomains = embeddingDomains + } + + public func validate(name: String) throws { + try self.validate(self.dashboardIdentifier, name: "dashboardIdentifier", parent: name, max: 1023) + try self.validate(self.dashboardIdentifier, name: "dashboardIdentifier", parent: name, min: 1) + try self.validate(self.dashboardIdentifier, name: "dashboardIdentifier", parent: name, pattern: "^arn:aws:aws-marketplace::[0-9]{12}:AWSMarketplace/ReportingData/(Agreement_V1/Dashboard/AgreementSummary_V1|BillingEvent_V1/Dashboard/CostAnalysis_V1)$") + try self.embeddingDomains.forEach { + try validate($0, name: "embeddingDomains[]", parent: name, max: 2000) + try validate($0, name: "embeddingDomains[]", parent: name, min: 1) + try validate($0, name: "embeddingDomains[]", parent: name, pattern: "^(https://[a-zA-Z\\.\\*0-9\\-_]+[\\.]{1}[a-zA-Z]{1,}[a-zA-Z0-9&?/-_=]*[a-zA-Z\\*0-9/]+|http[s]*://localhost(:[0-9]{1,5})?)$") + } + try self.validate(self.embeddingDomains, name: "embeddingDomains", parent: name, max: 2) + try self.validate(self.embeddingDomains, name: "embeddingDomains", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dashboardIdentifier = "dashboardIdentifier" + case embeddingDomains = "embeddingDomains" + } + } + + public struct GetBuyerDashboardOutput: AWSDecodableShape { + /// The ARN of the returned dashboard. + public let dashboardIdentifier: String + /// The fully qualified domains specified in the request. The domains enable access to the generated URL that is then embedded. You can list up to two domains or subdomains in each API call. To include all subdomains under a specific domain, use *. For example, https://*.amazon.com includes all subdomains under https://aws.amazon.com. + public let embeddingDomains: [String] + /// The dashboard's embedding URL. + public let embedUrl: String + + @inlinable + public init(dashboardIdentifier: String, embeddingDomains: [String], embedUrl: String) { + self.dashboardIdentifier = dashboardIdentifier + self.embeddingDomains = embeddingDomains + self.embedUrl = embedUrl + } + + private enum CodingKeys: String, CodingKey { + case dashboardIdentifier = "dashboardIdentifier" + case embeddingDomains = "embeddingDomains" + case embedUrl = "embedUrl" + } + } +} + +// MARK: - Errors + +/// Error enum for MarketplaceReporting +public struct MarketplaceReportingErrorType: AWSErrorType { + enum Code: String { + case accessDeniedException = "AccessDeniedException" + case badRequestException = "BadRequestException" + case internalServerException = "InternalServerException" + case unauthorizedException = "UnauthorizedException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize MarketplaceReporting + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You do not have sufficient access to perform this action. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// The request is malformed, or it contains an error such as an invalid parameter. Ensure the request has all required parameters. + public static var badRequestException: Self { .init(.badRequestException) } + /// The operation failed due to a server error. + public static var internalServerException: Self { .init(.internalServerException) } + /// You do not have permission to perform this action. + public static var unauthorizedException: Self { .init(.unauthorizedException) } +} + +extension MarketplaceReportingErrorType: Equatable { + public static func == (lhs: MarketplaceReportingErrorType, rhs: MarketplaceReportingErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension MarketplaceReportingErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift index 087c20ed73..e99d846b97 100644 --- a/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift +++ b/Sources/Soto/Services/MediaConvert/MediaConvert_shapes.swift @@ -2586,6 +2586,29 @@ extension MediaConvert { public var description: String { return self.rawValue } } + public enum PresetSpeke20Audio: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case presetAudio1 = "PRESET_AUDIO_1" + case presetAudio2 = "PRESET_AUDIO_2" + case presetAudio3 = "PRESET_AUDIO_3" + case shared = "SHARED" + case unencrypted = "UNENCRYPTED" + public var description: String { return self.rawValue } + } + + public enum PresetSpeke20Video: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case presetVideo1 = "PRESET_VIDEO_1" + case presetVideo2 = "PRESET_VIDEO_2" + case presetVideo3 = "PRESET_VIDEO_3" + case presetVideo4 = "PRESET_VIDEO_4" + case presetVideo5 = "PRESET_VIDEO_5" + case presetVideo6 = "PRESET_VIDEO_6" + case presetVideo7 = "PRESET_VIDEO_7" + case presetVideo8 = "PRESET_VIDEO_8" + case shared = "SHARED" + case unencrypted = "UNENCRYPTED" + public var description: String { return self.rawValue } + } + public enum PricingPlan: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case onDemand = "ON_DEMAND" case reserved = "RESERVED" @@ -6212,6 +6235,24 @@ extension MediaConvert { } } + public struct EncryptionContractConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Specify which SPEKE version 2.0 audio preset MediaConvert uses to request content keys from your SPEKE server. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/drm-content-speke-v2-presets.html To encrypt to your audio outputs, choose from the following: Audio preset 1, Audio preset 2, or Audio preset 3. To encrypt your audio outputs, using the same content key for both your audio and video outputs: Choose Shared. When you do, you must also set SPEKE v2.0 video preset to Shared. To not encrypt your audio outputs: Choose Unencrypted. When you do, to encrypt your video outputs, you must also specify a SPEKE v2.0 video preset (other than Shared or Unencrypted). + public let spekeAudioPreset: PresetSpeke20Audio? + /// Specify which SPEKE version 2.0 video preset MediaConvert uses to request content keys from your SPEKE server. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/drm-content-speke-v2-presets.html To encrypt to your video outputs, choose from the following: Video preset 1, Video preset 2, Video preset 3, Video preset 4, Video preset 5, Video preset 6, Video preset 7, or Video preset 8. To encrypt your video outputs, using the same content key for both your video and audio outputs: Choose Shared. When you do, you must also set SPEKE v2.0 audio preset to Shared. To not encrypt your video outputs: Choose Unencrypted. When you do, to encrypt your audio outputs, you must also specify a SPEKE v2.0 audio preset (other than Shared or Unencrypted). + public let spekeVideoPreset: PresetSpeke20Video? + + @inlinable + public init(spekeAudioPreset: PresetSpeke20Audio? = nil, spekeVideoPreset: PresetSpeke20Video? = nil) { + self.spekeAudioPreset = spekeAudioPreset + self.spekeVideoPreset = spekeVideoPreset + } + + private enum CodingKeys: String, CodingKey { + case spekeAudioPreset = "spekeAudioPreset" + case spekeVideoPreset = "spekeVideoPreset" + } + } + public struct Endpoint: AWSDecodableShape { /// URL of endpoint public let url: String? @@ -11078,6 +11119,8 @@ extension MediaConvert { public struct SpekeKeyProvider: AWSEncodableShape & AWSDecodableShape { /// If you want your key provider to encrypt the content keys that it provides to MediaConvert, set up a certificate with a master key using AWS Certificate Manager. Specify the certificate's Amazon Resource Name (ARN) here. public let certificateArn: String? + /// Specify the SPEKE version, either v1.0 or v2.0, that MediaConvert uses when encrypting your output. For more information, see: https://docs.aws.amazon.com/speke/latest/documentation/speke-api-specification.html To use SPEKE v1.0: Leave blank. To use SPEKE v2.0: Specify a SPEKE v2.0 video preset and a SPEKE v2.0 audio preset. + public let encryptionContractConfiguration: EncryptionContractConfiguration? /// Specify the resource ID that your SPEKE-compliant key provider uses to identify this content. public let resourceId: String? /// Relates to SPEKE implementation. DRM system identifiers. DASH output groups support a max of two system ids. Other group types support one system id. See https://dashif.org/identifiers/content_protection/ for more details. @@ -11086,8 +11129,9 @@ extension MediaConvert { public let url: String? @inlinable - public init(certificateArn: String? = nil, resourceId: String? = nil, systemIds: [String]? = nil, url: String? = nil) { + public init(certificateArn: String? = nil, encryptionContractConfiguration: EncryptionContractConfiguration? = nil, resourceId: String? = nil, systemIds: [String]? = nil, url: String? = nil) { self.certificateArn = certificateArn + self.encryptionContractConfiguration = encryptionContractConfiguration self.resourceId = resourceId self.systemIds = systemIds self.url = url @@ -11103,6 +11147,7 @@ extension MediaConvert { private enum CodingKeys: String, CodingKey { case certificateArn = "certificateArn" + case encryptionContractConfiguration = "encryptionContractConfiguration" case resourceId = "resourceId" case systemIds = "systemIds" case url = "url" @@ -11114,6 +11159,8 @@ extension MediaConvert { public let certificateArn: String? /// Specify the DRM system IDs that you want signaled in the DASH manifest that MediaConvert creates as part of this CMAF package. The DASH manifest can currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/. public let dashSignaledSystemIds: [String]? + /// Specify the SPEKE version, either v1.0 or v2.0, that MediaConvert uses when encrypting your output. For more information, see: https://docs.aws.amazon.com/speke/latest/documentation/speke-api-specification.html To use SPEKE v1.0: Leave blank. To use SPEKE v2.0: Specify a SPEKE v2.0 video preset and a SPEKE v2.0 audio preset. + public let encryptionContractConfiguration: EncryptionContractConfiguration? /// Specify the DRM system ID that you want signaled in the HLS manifest that MediaConvert creates as part of this CMAF package. The HLS manifest can currently signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/. public let hlsSignaledSystemIds: [String]? /// Specify the resource ID that your SPEKE-compliant key provider uses to identify this content. @@ -11122,9 +11169,10 @@ extension MediaConvert { public let url: String? @inlinable - public init(certificateArn: String? = nil, dashSignaledSystemIds: [String]? = nil, hlsSignaledSystemIds: [String]? = nil, resourceId: String? = nil, url: String? = nil) { + public init(certificateArn: String? = nil, dashSignaledSystemIds: [String]? = nil, encryptionContractConfiguration: EncryptionContractConfiguration? = nil, hlsSignaledSystemIds: [String]? = nil, resourceId: String? = nil, url: String? = nil) { self.certificateArn = certificateArn self.dashSignaledSystemIds = dashSignaledSystemIds + self.encryptionContractConfiguration = encryptionContractConfiguration self.hlsSignaledSystemIds = hlsSignaledSystemIds self.resourceId = resourceId self.url = url @@ -11149,6 +11197,7 @@ extension MediaConvert { private enum CodingKeys: String, CodingKey { case certificateArn = "certificateArn" case dashSignaledSystemIds = "dashSignaledSystemIds" + case encryptionContractConfiguration = "encryptionContractConfiguration" case hlsSignaledSystemIds = "hlsSignaledSystemIds" case resourceId = "resourceId" case url = "url" diff --git a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift index cfed66d670..e79c899a78 100644 --- a/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift +++ b/Sources/Soto/Services/MediaLive/MediaLive_shapes.swift @@ -244,6 +244,23 @@ extension MediaLive { public var description: String { return self.rawValue } } + public enum BandwidthReductionFilterStrength: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case auto = "AUTO" + case strength1 = "STRENGTH_1" + case strength2 = "STRENGTH_2" + case strength3 = "STRENGTH_3" + case strength4 = "STRENGTH_4" + public var description: String { return self.rawValue } + } + + public enum BandwidthReductionPostFilterSharpening: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case sharpening1 = "SHARPENING_1" + case sharpening2 = "SHARPENING_2" + case sharpening3 = "SHARPENING_3" + public var description: String { return self.rawValue } + } + public enum BlackoutSlateNetworkEndBlackout: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "DISABLED" case enabled = "ENABLED" @@ -1417,7 +1434,6 @@ extension MediaLive { public enum InputNetworkLocation: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case aws = "AWS" - case onPremise = "ON_PREMISE" case onPremises = "ON_PREMISES" public var description: String { return self.rawValue } } @@ -3187,6 +3203,28 @@ extension MediaLive { } } + public struct BandwidthReductionFilterSettings: AWSEncodableShape & AWSDecodableShape { + /// Configures the sharpening control, which is available when the bandwidth reduction filter is enabled. This + /// control sharpens edges and contours, which produces a specific artistic effect that you might want. + /// We recommend that you test each of the values (including DISABLED) to observe the sharpening effect on the + /// content. + public let postFilterSharpening: BandwidthReductionPostFilterSharpening? + /// Enables the bandwidth reduction filter. The filter strengths range from 1 to 4. We recommend that you always + /// enable this filter and use AUTO, to let MediaLive apply the optimum filtering for the context. + public let strength: BandwidthReductionFilterStrength? + + @inlinable + public init(postFilterSharpening: BandwidthReductionPostFilterSharpening? = nil, strength: BandwidthReductionFilterStrength? = nil) { + self.postFilterSharpening = postFilterSharpening + self.strength = strength + } + + private enum CodingKeys: String, CodingKey { + case postFilterSharpening = "postFilterSharpening" + case strength = "strength" + } + } + public struct BatchDeleteRequest: AWSEncodableShape { /// List of channel IDs public let channelIds: [String]? @@ -8903,14 +8941,17 @@ extension MediaLive { } public struct H264FilterSettings: AWSEncodableShape & AWSDecodableShape { + public let bandwidthReductionFilterSettings: BandwidthReductionFilterSettings? public let temporalFilterSettings: TemporalFilterSettings? @inlinable - public init(temporalFilterSettings: TemporalFilterSettings? = nil) { + public init(bandwidthReductionFilterSettings: BandwidthReductionFilterSettings? = nil, temporalFilterSettings: TemporalFilterSettings? = nil) { + self.bandwidthReductionFilterSettings = bandwidthReductionFilterSettings self.temporalFilterSettings = temporalFilterSettings } private enum CodingKeys: String, CodingKey { + case bandwidthReductionFilterSettings = "bandwidthReductionFilterSettings" case temporalFilterSettings = "temporalFilterSettings" } } @@ -9186,14 +9227,17 @@ extension MediaLive { } public struct H265FilterSettings: AWSEncodableShape & AWSDecodableShape { + public let bandwidthReductionFilterSettings: BandwidthReductionFilterSettings? public let temporalFilterSettings: TemporalFilterSettings? @inlinable - public init(temporalFilterSettings: TemporalFilterSettings? = nil) { + public init(bandwidthReductionFilterSettings: BandwidthReductionFilterSettings? = nil, temporalFilterSettings: TemporalFilterSettings? = nil) { + self.bandwidthReductionFilterSettings = bandwidthReductionFilterSettings self.temporalFilterSettings = temporalFilterSettings } private enum CodingKeys: String, CodingKey { + case bandwidthReductionFilterSettings = "bandwidthReductionFilterSettings" case temporalFilterSettings = "temporalFilterSettings" } } @@ -13058,10 +13102,99 @@ extension MediaLive { } } + public struct MultiplexContainerSettings: AWSEncodableShape & AWSDecodableShape { + public let multiplexM2tsSettings: MultiplexM2tsSettings? + + @inlinable + public init(multiplexM2tsSettings: MultiplexM2tsSettings? = nil) { + self.multiplexM2tsSettings = multiplexM2tsSettings + } + + public func validate(name: String) throws { + try self.multiplexM2tsSettings?.validate(name: "\(name).multiplexM2tsSettings") + } + + private enum CodingKeys: String, CodingKey { + case multiplexM2tsSettings = "multiplexM2tsSettings" + } + } + public struct MultiplexGroupSettings: AWSEncodableShape & AWSDecodableShape { public init() {} } + public struct MultiplexM2tsSettings: AWSEncodableShape & AWSDecodableShape { + /// When set to drop, output audio streams will be removed from the program if the selected input audio stream is removed from the input. This allows the output audio configuration to dynamically change based on input configuration. If this is set to encodeSilence, all output audio streams will output encoded silence when not connected to an active input stream. + public let absentInputAudioBehavior: M2tsAbsentInputAudioBehavior? + /// When set to enabled, uses ARIB-compliant field muxing and removes video descriptor. + public let arib: M2tsArib? + /// When set to dvb, uses DVB buffer model for Dolby Digital audio. When set to atsc, the ATSC model is used. + public let audioBufferModel: M2tsAudioBufferModel? + /// The number of audio frames to insert for each PES packet. + public let audioFramesPerPes: Int? + /// When set to atsc, uses stream type = 0x81 for AC3 and stream type = 0x87 for EAC3. When set to dvb, uses stream type = 0x06. + public let audioStreamType: M2tsAudioStreamType? + /// When set to enabled, generates captionServiceDescriptor in PMT. + public let ccDescriptor: M2tsCcDescriptor? + /// If set to passthrough, passes any EBIF data from the input source to this output. + public let ebif: M2tsEbifControl? + /// Include or exclude the ES Rate field in the PES header. + public let esRateInPes: M2tsEsRateInPes? + /// If set to passthrough, passes any KLV data from the input source to this output. + public let klv: M2tsKlv? + /// If set to passthrough, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output. + public let nielsenId3Behavior: M2tsNielsenId3Behavior? + /// When set to pcrEveryPesPacket, a Program Clock Reference value is inserted for every Packetized Elementary Stream (PES) header. This parameter is effective only when the PCR PID is the same as the video or audio elementary stream. + public let pcrControl: M2tsPcrControl? + /// Maximum time in milliseconds between Program Clock Reference (PCRs) inserted into the transport stream. + public let pcrPeriod: Int? + /// Optionally pass SCTE-35 signals from the input source to this output. + public let scte35Control: M2tsScte35Control? + /// Defines the amount SCTE-35 preroll will be increased (in milliseconds) on the output. Preroll is the amount of time between the presence of a SCTE-35 indication in a transport stream and the PTS of the video frame it references. Zero means don't add pullup (it doesn't mean set the preroll to zero). Negative pullup is not supported, which means that you can't make the preroll shorter. Be aware that latency in the output will increase by the pullup amount. + public let scte35PrerollPullupMilliseconds: Double? + + @inlinable + public init(absentInputAudioBehavior: M2tsAbsentInputAudioBehavior? = nil, arib: M2tsArib? = nil, audioBufferModel: M2tsAudioBufferModel? = nil, audioFramesPerPes: Int? = nil, audioStreamType: M2tsAudioStreamType? = nil, ccDescriptor: M2tsCcDescriptor? = nil, ebif: M2tsEbifControl? = nil, esRateInPes: M2tsEsRateInPes? = nil, klv: M2tsKlv? = nil, nielsenId3Behavior: M2tsNielsenId3Behavior? = nil, pcrControl: M2tsPcrControl? = nil, pcrPeriod: Int? = nil, scte35Control: M2tsScte35Control? = nil, scte35PrerollPullupMilliseconds: Double? = nil) { + self.absentInputAudioBehavior = absentInputAudioBehavior + self.arib = arib + self.audioBufferModel = audioBufferModel + self.audioFramesPerPes = audioFramesPerPes + self.audioStreamType = audioStreamType + self.ccDescriptor = ccDescriptor + self.ebif = ebif + self.esRateInPes = esRateInPes + self.klv = klv + self.nielsenId3Behavior = nielsenId3Behavior + self.pcrControl = pcrControl + self.pcrPeriod = pcrPeriod + self.scte35Control = scte35Control + self.scte35PrerollPullupMilliseconds = scte35PrerollPullupMilliseconds + } + + public func validate(name: String) throws { + try self.validate(self.audioFramesPerPes, name: "audioFramesPerPes", parent: name, min: 0) + try self.validate(self.pcrPeriod, name: "pcrPeriod", parent: name, max: 500) + try self.validate(self.pcrPeriod, name: "pcrPeriod", parent: name, min: 0) + } + + private enum CodingKeys: String, CodingKey { + case absentInputAudioBehavior = "absentInputAudioBehavior" + case arib = "arib" + case audioBufferModel = "audioBufferModel" + case audioFramesPerPes = "audioFramesPerPes" + case audioStreamType = "audioStreamType" + case ccDescriptor = "ccDescriptor" + case ebif = "ebif" + case esRateInPes = "esRateInPes" + case klv = "klv" + case nielsenId3Behavior = "nielsenId3Behavior" + case pcrControl = "pcrControl" + case pcrPeriod = "pcrPeriod" + case scte35Control = "scte35Control" + case scte35PrerollPullupMilliseconds = "scte35PrerollPullupMilliseconds" + } + } + public struct MultiplexMediaConnectOutputDestinationSettings: AWSDecodableShape { /// The MediaConnect entitlement ARN available as a Flow source. public let entitlementArn: String? @@ -13091,15 +13224,22 @@ extension MediaLive { } public struct MultiplexOutputSettings: AWSEncodableShape & AWSDecodableShape { + public let containerSettings: MultiplexContainerSettings? /// Destination is a Multiplex. public let destination: OutputLocationRef? @inlinable - public init(destination: OutputLocationRef? = nil) { + public init(containerSettings: MultiplexContainerSettings? = nil, destination: OutputLocationRef? = nil) { + self.containerSettings = containerSettings self.destination = destination } + public func validate(name: String) throws { + try self.containerSettings?.validate(name: "\(name).containerSettings") + } + private enum CodingKeys: String, CodingKey { + case containerSettings = "containerSettings" case destination = "destination" } } @@ -13927,6 +14067,7 @@ extension MediaLive { public func validate(name: String) throws { try self.archiveOutputSettings?.validate(name: "\(name).archiveOutputSettings") try self.hlsOutputSettings?.validate(name: "\(name).hlsOutputSettings") + try self.multiplexOutputSettings?.validate(name: "\(name).multiplexOutputSettings") try self.rtmpOutputSettings?.validate(name: "\(name).rtmpOutputSettings") try self.srtOutputSettings?.validate(name: "\(name).srtOutputSettings") try self.udpOutputSettings?.validate(name: "\(name).udpOutputSettings") diff --git a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_api.swift b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_api.swift index 0667c12953..b7e1af45d4 100644 --- a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_api.swift +++ b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS MediaPackageV2 service. /// -/// This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to migrate your resources from MediaPackage v1 to MediaPackage v2. The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information added, like "v2", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources. If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, see the MediaPackage v1 Live API Reference. This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols. We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide. +/// This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to migrate your resources from MediaPackage v1 to MediaPackage v2. The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information added, like "v2", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources. If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, see the MediaPackage v1 Live API Reference. This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols. We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide. public struct MediaPackageV2: AWSService { // MARK: Member variables @@ -759,7 +759,7 @@ public struct MediaPackageV2: AWSService { return try await self.putOriginEndpointPolicy(input, logger: logger) } - /// Assigns one of more tags (key-value pairs) to the specified MediaPackage resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. + /// Assigns one of more tags (key-value pairs) to the specified MediaPackage resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -772,7 +772,7 @@ public struct MediaPackageV2: AWSService { logger: logger ) } - /// Assigns one of more tags (key-value pairs) to the specified MediaPackage resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. + /// Assigns one of more tags (key-value pairs) to the specified MediaPackage resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource operation with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. /// /// Parameters: /// - resourceArn: The ARN of the MediaPackage resource that you're adding tags to. diff --git a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift index a639527b5b..9fe4c48350 100644 --- a/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift +++ b/Sources/Soto/Services/MediaPackageV2/MediaPackageV2_shapes.swift @@ -471,18 +471,20 @@ extension MediaPackageV2 { public let manifestName: String /// The total duration (in seconds) of the manifest's content. public let manifestWindowSeconds: Int? - /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. + /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. public let programDateTimeIntervalSeconds: Int? public let scteHls: ScteHls? + public let startTag: StartTag? @inlinable - public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil) { + public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil, startTag: StartTag? = nil) { self.childManifestName = childManifestName self.filterConfiguration = filterConfiguration self.manifestName = manifestName self.manifestWindowSeconds = manifestWindowSeconds self.programDateTimeIntervalSeconds = programDateTimeIntervalSeconds self.scteHls = scteHls + self.startTag = startTag } public func validate(name: String) throws { @@ -501,6 +503,7 @@ extension MediaPackageV2 { case manifestWindowSeconds = "ManifestWindowSeconds" case programDateTimeIntervalSeconds = "ProgramDateTimeIntervalSeconds" case scteHls = "ScteHls" + case startTag = "StartTag" } } @@ -512,18 +515,20 @@ extension MediaPackageV2 { public let manifestName: String /// The total duration (in seconds) of the manifest's content. public let manifestWindowSeconds: Int? - /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. + /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. public let programDateTimeIntervalSeconds: Int? public let scteHls: ScteHls? + public let startTag: StartTag? @inlinable - public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil) { + public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil, startTag: StartTag? = nil) { self.childManifestName = childManifestName self.filterConfiguration = filterConfiguration self.manifestName = manifestName self.manifestWindowSeconds = manifestWindowSeconds self.programDateTimeIntervalSeconds = programDateTimeIntervalSeconds self.scteHls = scteHls + self.startTag = startTag } public func validate(name: String) throws { @@ -542,6 +547,7 @@ extension MediaPackageV2 { case manifestWindowSeconds = "ManifestWindowSeconds" case programDateTimeIntervalSeconds = "ProgramDateTimeIntervalSeconds" case scteHls = "ScteHls" + case startTag = "StartTag" } } @@ -985,6 +991,8 @@ extension MediaPackageV2 { } public struct FilterConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Optionally specify the clip start time for all of your manifest egress requests. When you include clip start time, note that you cannot use clip start time query parameters for this manifest's endpoint URL. + public let clipStartTime: Date? /// Optionally specify the end time for all of your manifest egress requests. When you include end time, note that you cannot use end time query parameters for this manifest's endpoint URL. public let end: Date? /// Optionally specify one or more manifest filters for all of your manifest egress requests. When you include a manifest filter, note that you cannot use an identical manifest filter query parameter for this manifest's endpoint URL. @@ -995,7 +1003,8 @@ extension MediaPackageV2 { public let timeDelaySeconds: Int? @inlinable - public init(end: Date? = nil, manifestFilter: String? = nil, start: Date? = nil, timeDelaySeconds: Int? = nil) { + public init(clipStartTime: Date? = nil, end: Date? = nil, manifestFilter: String? = nil, start: Date? = nil, timeDelaySeconds: Int? = nil) { + self.clipStartTime = clipStartTime self.end = end self.manifestFilter = manifestFilter self.start = start @@ -1003,6 +1012,7 @@ extension MediaPackageV2 { } private enum CodingKeys: String, CodingKey { + case clipStartTime = "ClipStartTime" case end = "End" case manifestFilter = "ManifestFilter" case start = "Start" @@ -1288,20 +1298,22 @@ extension MediaPackageV2 { public let manifestName: String /// The total duration (in seconds) of the manifest's content. public let manifestWindowSeconds: Int? - /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. + /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. public let programDateTimeIntervalSeconds: Int? public let scteHls: ScteHls? + public let startTag: StartTag? /// The egress domain URL for stream delivery from MediaPackage. public let url: String @inlinable - public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil, url: String) { + public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil, startTag: StartTag? = nil, url: String) { self.childManifestName = childManifestName self.filterConfiguration = filterConfiguration self.manifestName = manifestName self.manifestWindowSeconds = manifestWindowSeconds self.programDateTimeIntervalSeconds = programDateTimeIntervalSeconds self.scteHls = scteHls + self.startTag = startTag self.url = url } @@ -1312,6 +1324,7 @@ extension MediaPackageV2 { case manifestWindowSeconds = "ManifestWindowSeconds" case programDateTimeIntervalSeconds = "ProgramDateTimeIntervalSeconds" case scteHls = "ScteHls" + case startTag = "StartTag" case url = "Url" } } @@ -1324,20 +1337,22 @@ extension MediaPackageV2 { public let manifestName: String /// The total duration (in seconds) of the manifest's content. public let manifestWindowSeconds: Int? - /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. + /// Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output. public let programDateTimeIntervalSeconds: Int? public let scteHls: ScteHls? + public let startTag: StartTag? /// The egress domain URL for stream delivery from MediaPackage. public let url: String @inlinable - public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil, url: String) { + public init(childManifestName: String? = nil, filterConfiguration: FilterConfiguration? = nil, manifestName: String, manifestWindowSeconds: Int? = nil, programDateTimeIntervalSeconds: Int? = nil, scteHls: ScteHls? = nil, startTag: StartTag? = nil, url: String) { self.childManifestName = childManifestName self.filterConfiguration = filterConfiguration self.manifestName = manifestName self.manifestWindowSeconds = manifestWindowSeconds self.programDateTimeIntervalSeconds = programDateTimeIntervalSeconds self.scteHls = scteHls + self.startTag = startTag self.url = url } @@ -1348,6 +1363,7 @@ extension MediaPackageV2 { case manifestWindowSeconds = "ManifestWindowSeconds" case programDateTimeIntervalSeconds = "ProgramDateTimeIntervalSeconds" case scteHls = "ScteHls" + case startTag = "StartTag" case url = "Url" } } @@ -2061,6 +2077,24 @@ extension MediaPackageV2 { } } + public struct StartTag: AWSEncodableShape & AWSDecodableShape { + /// Specify the value for PRECISE within your EXT-X-START tag. Leave blank, or choose false, to use the default value NO. Choose yes to use the value YES. + public let precise: Bool? + /// Specify the value for TIME-OFFSET within your EXT-X-START tag. Enter a signed floating point value which, if positive, must be less than the configured manifest duration minus three times the configured segment target duration. If negative, the absolute value must be larger than three times the configured segment target duration, and the absolute value must be smaller than the configured manifest duration. + public let timeOffset: Float + + @inlinable + public init(precise: Bool? = nil, timeOffset: Float) { + self.precise = precise + self.timeOffset = timeOffset + } + + private enum CodingKeys: String, CodingKey { + case precise = "Precise" + case timeOffset = "TimeOffset" + } + } + public struct TagResourceRequest: AWSEncodableShape { /// The ARN of the MediaPackage resource that you're adding tags to. public let resourceArn: String diff --git a/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift b/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift index 58cdf41db0..fa28572bb4 100644 --- a/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift +++ b/Sources/Soto/Services/MemoryDB/MemoryDB_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS MemoryDB service. /// -/// MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands. +/// MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures. MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands. public struct MemoryDB: AWSService { // MARK: Member variables @@ -216,7 +216,8 @@ public struct MemoryDB: AWSService { /// - clusterName: The name of the cluster. This value must be unique as it also serves as the cluster identifier. /// - dataTiering: Enables data tiering. Data tiering is only supported for clusters using the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see Data tiering. /// - description: An optional description of the cluster. - /// - engineVersion: The version number of the Redis OSS engine to be used for the cluster. + /// - engine: The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey. + /// - engineVersion: The version number of the engine to be used for the cluster. /// - kmsKeyId: The ID of the KMS key used to encrypt the cluster. /// - maintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 /// - nodeType: The compute and memory capacity of the nodes in the cluster. @@ -241,6 +242,7 @@ public struct MemoryDB: AWSService { clusterName: String, dataTiering: Bool? = nil, description: String? = nil, + engine: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, maintenanceWindow: String? = nil, @@ -266,6 +268,7 @@ public struct MemoryDB: AWSService { clusterName: clusterName, dataTiering: dataTiering, description: description, + engine: engine, engineVersion: engineVersion, kmsKeyId: kmsKeyId, maintenanceWindow: maintenanceWindow, @@ -468,7 +471,7 @@ public struct MemoryDB: AWSService { return try await self.deleteACL(input, logger: logger) } - /// Deletes a cluster. It also deletes all associated nodes and node endpoints CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. + /// Deletes a cluster. It also deletes all associated nodes and node endpoints @Sendable @inlinable public func deleteCluster(_ input: DeleteClusterRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteClusterResponse { @@ -481,7 +484,7 @@ public struct MemoryDB: AWSService { logger: logger ) } - /// Deletes a cluster. It also deletes all associated nodes and node endpoints CreateSnapshot permission is required to create a final snapshot. Without this permission, the API call will fail with an Access Denied exception. + /// Deletes a cluster. It also deletes all associated nodes and node endpoints /// /// Parameters: /// - clusterName: The name of the cluster to be deleted @@ -689,7 +692,7 @@ public struct MemoryDB: AWSService { return try await self.describeClusters(input, logger: logger) } - /// Returns a list of the available Redis OSS engine versions. + /// Returns a list of the available engine versions. @Sendable @inlinable public func describeEngineVersions(_ input: DescribeEngineVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeEngineVersionsResponse { @@ -702,11 +705,12 @@ public struct MemoryDB: AWSService { logger: logger ) } - /// Returns a list of the available Redis OSS engine versions. + /// Returns a list of the available engine versions. /// /// Parameters: /// - defaultOnly: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. - /// - engineVersion: The Redis OSS engine version + /// - engine: The engine version to return. Valid values are either valkey or redis. + /// - engineVersion: The engine version. /// - maxResults: The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. /// - nextToken: An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. /// - parameterGroupFamily: The name of a specific parameter group family to return details for. @@ -714,6 +718,7 @@ public struct MemoryDB: AWSService { @inlinable public func describeEngineVersions( defaultOnly: Bool? = nil, + engine: String? = nil, engineVersion: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, @@ -722,6 +727,7 @@ public struct MemoryDB: AWSService { ) async throws -> DescribeEngineVersionsResponse { let input = DescribeEngineVersionsRequest( defaultOnly: defaultOnly, + engine: engine, engineVersion: engineVersion, maxResults: maxResults, nextToken: nextToken, @@ -1381,6 +1387,7 @@ public struct MemoryDB: AWSService { /// - aclName: The Access Control List that is associated with the cluster /// - clusterName: The name of the cluster to update /// - description: The description of the cluster to update + /// - engine: The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey. /// - engineVersion: The upgraded version of the engine to be run on the nodes. You can upgrade to a newer engine version, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. /// - maintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 /// - nodeType: A valid node type that you want to scale this cluster up or down to. @@ -1398,6 +1405,7 @@ public struct MemoryDB: AWSService { aclName: String? = nil, clusterName: String, description: String? = nil, + engine: String? = nil, engineVersion: String? = nil, maintenanceWindow: String? = nil, nodeType: String? = nil, @@ -1415,6 +1423,7 @@ public struct MemoryDB: AWSService { aclName: aclName, clusterName: clusterName, description: description, + engine: engine, engineVersion: engineVersion, maintenanceWindow: maintenanceWindow, nodeType: nodeType, @@ -1645,13 +1654,15 @@ extension MemoryDB { /// /// - Parameters: /// - defaultOnly: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. - /// - engineVersion: The Redis OSS engine version + /// - engine: The engine version to return. Valid values are either valkey or redis. + /// - engineVersion: The engine version. /// - maxResults: The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. /// - parameterGroupFamily: The name of a specific parameter group family to return details for. /// - logger: Logger used for logging @inlinable public func describeEngineVersionsPaginator( defaultOnly: Bool? = nil, + engine: String? = nil, engineVersion: String? = nil, maxResults: Int? = nil, parameterGroupFamily: String? = nil, @@ -1659,6 +1670,7 @@ extension MemoryDB { ) -> AWSClient.PaginatorSequence { let input = DescribeEngineVersionsRequest( defaultOnly: defaultOnly, + engine: engine, engineVersion: engineVersion, maxResults: maxResults, parameterGroupFamily: parameterGroupFamily @@ -2079,6 +2091,7 @@ extension MemoryDB.DescribeEngineVersionsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> MemoryDB.DescribeEngineVersionsRequest { return .init( defaultOnly: self.defaultOnly, + engine: self.engine, engineVersion: self.engineVersion, maxResults: self.maxResults, nextToken: token, diff --git a/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift b/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift index 798a5c2630..a5a2fa39ea 100644 --- a/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift +++ b/Sources/Soto/Services/MemoryDB/MemoryDB_shapes.swift @@ -255,9 +255,11 @@ extension MemoryDB { public let dataTiering: DataTieringStatus? /// A description of the cluster public let description: String? - /// The Redis OSS engine patch version used by the cluster + /// The Redis OSS or Valkey engine used by the cluster. + public let engine: String? + /// The engine patch version used by the cluster public let enginePatchVersion: String? - /// The Redis OSS engine version used by the cluster + /// The Redis engine version used by the cluster public let engineVersion: String? /// The ID of the KMS key used to encrypt the cluster public let kmsKeyId: String? @@ -295,7 +297,7 @@ extension MemoryDB { public let tlsEnabled: Bool? @inlinable - public init(aclName: String? = nil, arn: String? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityMode: AZStatus? = nil, clusterEndpoint: Endpoint? = nil, dataTiering: DataTieringStatus? = nil, description: String? = nil, enginePatchVersion: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, maintenanceWindow: String? = nil, name: String? = nil, nodeType: String? = nil, numberOfShards: Int? = nil, parameterGroupName: String? = nil, parameterGroupStatus: String? = nil, pendingUpdates: ClusterPendingUpdates? = nil, securityGroups: [SecurityGroupMembership]? = nil, shards: [Shard]? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, snsTopicArn: String? = nil, snsTopicStatus: String? = nil, status: String? = nil, subnetGroupName: String? = nil, tlsEnabled: Bool? = nil) { + public init(aclName: String? = nil, arn: String? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityMode: AZStatus? = nil, clusterEndpoint: Endpoint? = nil, dataTiering: DataTieringStatus? = nil, description: String? = nil, engine: String? = nil, enginePatchVersion: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, maintenanceWindow: String? = nil, name: String? = nil, nodeType: String? = nil, numberOfShards: Int? = nil, parameterGroupName: String? = nil, parameterGroupStatus: String? = nil, pendingUpdates: ClusterPendingUpdates? = nil, securityGroups: [SecurityGroupMembership]? = nil, shards: [Shard]? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, snsTopicArn: String? = nil, snsTopicStatus: String? = nil, status: String? = nil, subnetGroupName: String? = nil, tlsEnabled: Bool? = nil) { self.aclName = aclName self.arn = arn self.autoMinorVersionUpgrade = autoMinorVersionUpgrade @@ -303,6 +305,7 @@ extension MemoryDB { self.clusterEndpoint = clusterEndpoint self.dataTiering = dataTiering self.description = description + self.engine = engine self.enginePatchVersion = enginePatchVersion self.engineVersion = engineVersion self.kmsKeyId = kmsKeyId @@ -332,6 +335,7 @@ extension MemoryDB { case clusterEndpoint = "ClusterEndpoint" case dataTiering = "DataTiering" case description = "Description" + case engine = "Engine" case enginePatchVersion = "EnginePatchVersion" case engineVersion = "EngineVersion" case kmsKeyId = "KmsKeyId" @@ -357,7 +361,9 @@ extension MemoryDB { public struct ClusterConfiguration: AWSDecodableShape { /// The description of the cluster configuration public let description: String? - /// The Redis OSS engine version used by the cluster + /// The configuration for the Redis OSS or Valkey engine used by the cluster. + public let engine: String? + /// The engine version used by the cluster public let engineVersion: String? /// The specified maintenance window for the cluster public let maintenanceWindow: String? @@ -385,8 +391,9 @@ extension MemoryDB { public let vpcId: String? @inlinable - public init(description: String? = nil, engineVersion: String? = nil, maintenanceWindow: String? = nil, name: String? = nil, nodeType: String? = nil, numShards: Int? = nil, parameterGroupName: String? = nil, port: Int? = nil, shards: [ShardDetail]? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, subnetGroupName: String? = nil, topicArn: String? = nil, vpcId: String? = nil) { + public init(description: String? = nil, engine: String? = nil, engineVersion: String? = nil, maintenanceWindow: String? = nil, name: String? = nil, nodeType: String? = nil, numShards: Int? = nil, parameterGroupName: String? = nil, port: Int? = nil, shards: [ShardDetail]? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, subnetGroupName: String? = nil, topicArn: String? = nil, vpcId: String? = nil) { self.description = description + self.engine = engine self.engineVersion = engineVersion self.maintenanceWindow = maintenanceWindow self.name = name @@ -404,6 +411,7 @@ extension MemoryDB { private enum CodingKeys: String, CodingKey { case description = "Description" + case engine = "Engine" case engineVersion = "EngineVersion" case maintenanceWindow = "MaintenanceWindow" case name = "Name" @@ -549,7 +557,9 @@ extension MemoryDB { public let dataTiering: Bool? /// An optional description of the cluster. public let description: String? - /// The version number of the Redis OSS engine to be used for the cluster. + /// The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey. + public let engine: String? + /// The version number of the engine to be used for the cluster. public let engineVersion: String? /// The ID of the KMS key used to encrypt the cluster. public let kmsKeyId: String? @@ -585,12 +595,13 @@ extension MemoryDB { public let tlsEnabled: Bool? @inlinable - public init(aclName: String, autoMinorVersionUpgrade: Bool? = nil, clusterName: String, dataTiering: Bool? = nil, description: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, maintenanceWindow: String? = nil, nodeType: String, numReplicasPerShard: Int? = nil, numShards: Int? = nil, parameterGroupName: String? = nil, port: Int? = nil, securityGroupIds: [String]? = nil, snapshotArns: [String]? = nil, snapshotName: String? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, snsTopicArn: String? = nil, subnetGroupName: String? = nil, tags: [Tag]? = nil, tlsEnabled: Bool? = nil) { + public init(aclName: String, autoMinorVersionUpgrade: Bool? = nil, clusterName: String, dataTiering: Bool? = nil, description: String? = nil, engine: String? = nil, engineVersion: String? = nil, kmsKeyId: String? = nil, maintenanceWindow: String? = nil, nodeType: String, numReplicasPerShard: Int? = nil, numShards: Int? = nil, parameterGroupName: String? = nil, port: Int? = nil, securityGroupIds: [String]? = nil, snapshotArns: [String]? = nil, snapshotName: String? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, snsTopicArn: String? = nil, subnetGroupName: String? = nil, tags: [Tag]? = nil, tlsEnabled: Bool? = nil) { self.aclName = aclName self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.clusterName = clusterName self.dataTiering = dataTiering self.description = description + self.engine = engine self.engineVersion = engineVersion self.kmsKeyId = kmsKeyId self.maintenanceWindow = maintenanceWindow @@ -622,6 +633,7 @@ extension MemoryDB { case clusterName = "ClusterName" case dataTiering = "DataTiering" case description = "Description" + case engine = "Engine" case engineVersion = "EngineVersion" case kmsKeyId = "KmsKeyId" case maintenanceWindow = "MaintenanceWindow" @@ -1100,7 +1112,9 @@ extension MemoryDB { public struct DescribeEngineVersionsRequest: AWSEncodableShape { /// If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. public let defaultOnly: Bool? - /// The Redis OSS engine version + /// The engine version to return. Valid values are either valkey or redis. + public let engine: String? + /// The engine version. public let engineVersion: String? /// The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. public let maxResults: Int? @@ -1110,8 +1124,9 @@ extension MemoryDB { public let parameterGroupFamily: String? @inlinable - public init(defaultOnly: Bool? = nil, engineVersion: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, parameterGroupFamily: String? = nil) { + public init(defaultOnly: Bool? = nil, engine: String? = nil, engineVersion: String? = nil, maxResults: Int? = nil, nextToken: String? = nil, parameterGroupFamily: String? = nil) { self.defaultOnly = defaultOnly + self.engine = engine self.engineVersion = engineVersion self.maxResults = maxResults self.nextToken = nextToken @@ -1120,6 +1135,7 @@ extension MemoryDB { private enum CodingKeys: String, CodingKey { case defaultOnly = "DefaultOnly" + case engine = "Engine" case engineVersion = "EngineVersion" case maxResults = "MaxResults" case nextToken = "NextToken" @@ -1605,6 +1621,8 @@ extension MemoryDB { } public struct EngineVersionInfo: AWSDecodableShape { + /// The version of the Redis OSS or Valkey engine used by the cluster. + public let engine: String? /// The patched engine version public let enginePatchVersion: String? /// The engine version @@ -1613,13 +1631,15 @@ extension MemoryDB { public let parameterGroupFamily: String? @inlinable - public init(enginePatchVersion: String? = nil, engineVersion: String? = nil, parameterGroupFamily: String? = nil) { + public init(engine: String? = nil, enginePatchVersion: String? = nil, engineVersion: String? = nil, parameterGroupFamily: String? = nil) { + self.engine = engine self.enginePatchVersion = enginePatchVersion self.engineVersion = engineVersion self.parameterGroupFamily = parameterGroupFamily } private enum CodingKeys: String, CodingKey { + case engine = "Engine" case enginePatchVersion = "EnginePatchVersion" case engineVersion = "EngineVersion" case parameterGroupFamily = "ParameterGroupFamily" @@ -2135,6 +2155,8 @@ extension MemoryDB { public let clusterName: String? /// Provides details of the service update public let description: String? + /// The MemoryDB engine to which the update applies. The values are either Redis or Valkey. + public let engine: String? /// A list of nodes updated by the service update public let nodesUpdated: String? /// The date when the service update is initially available @@ -2147,10 +2169,11 @@ extension MemoryDB { public let type: ServiceUpdateType? @inlinable - public init(autoUpdateStartDate: Date? = nil, clusterName: String? = nil, description: String? = nil, nodesUpdated: String? = nil, releaseDate: Date? = nil, serviceUpdateName: String? = nil, status: ServiceUpdateStatus? = nil, type: ServiceUpdateType? = nil) { + public init(autoUpdateStartDate: Date? = nil, clusterName: String? = nil, description: String? = nil, engine: String? = nil, nodesUpdated: String? = nil, releaseDate: Date? = nil, serviceUpdateName: String? = nil, status: ServiceUpdateStatus? = nil, type: ServiceUpdateType? = nil) { self.autoUpdateStartDate = autoUpdateStartDate self.clusterName = clusterName self.description = description + self.engine = engine self.nodesUpdated = nodesUpdated self.releaseDate = releaseDate self.serviceUpdateName = serviceUpdateName @@ -2162,6 +2185,7 @@ extension MemoryDB { case autoUpdateStartDate = "AutoUpdateStartDate" case clusterName = "ClusterName" case description = "Description" + case engine = "Engine" case nodesUpdated = "NodesUpdated" case releaseDate = "ReleaseDate" case serviceUpdateName = "ServiceUpdateName" @@ -2536,6 +2560,8 @@ extension MemoryDB { public let clusterName: String /// The description of the cluster to update public let description: String? + /// The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey. + public let engine: String? /// The upgraded version of the engine to be run on the nodes. You can upgrade to a newer engine version, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. public let engineVersion: String? /// Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun mon tue wed thu fri sat Example: sun:23:00-mon:01:30 @@ -2560,10 +2586,11 @@ extension MemoryDB { public let snsTopicStatus: String? @inlinable - public init(aclName: String? = nil, clusterName: String, description: String? = nil, engineVersion: String? = nil, maintenanceWindow: String? = nil, nodeType: String? = nil, parameterGroupName: String? = nil, replicaConfiguration: ReplicaConfigurationRequest? = nil, securityGroupIds: [String]? = nil, shardConfiguration: ShardConfigurationRequest? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, snsTopicArn: String? = nil, snsTopicStatus: String? = nil) { + public init(aclName: String? = nil, clusterName: String, description: String? = nil, engine: String? = nil, engineVersion: String? = nil, maintenanceWindow: String? = nil, nodeType: String? = nil, parameterGroupName: String? = nil, replicaConfiguration: ReplicaConfigurationRequest? = nil, securityGroupIds: [String]? = nil, shardConfiguration: ShardConfigurationRequest? = nil, snapshotRetentionLimit: Int? = nil, snapshotWindow: String? = nil, snsTopicArn: String? = nil, snsTopicStatus: String? = nil) { self.aclName = aclName self.clusterName = clusterName self.description = description + self.engine = engine self.engineVersion = engineVersion self.maintenanceWindow = maintenanceWindow self.nodeType = nodeType @@ -2586,6 +2613,7 @@ extension MemoryDB { case aclName = "ACLName" case clusterName = "ClusterName" case description = "Description" + case engine = "Engine" case engineVersion = "EngineVersion" case maintenanceWindow = "MaintenanceWindow" case nodeType = "NodeType" diff --git a/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_api.swift b/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_api.swift index 485fd4132e..a76f37ecca 100644 --- a/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_api.swift +++ b/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_api.swift @@ -245,7 +245,7 @@ public struct NeptuneGraph: AWSService { /// - blankNodeHandling: The method to handle blank nodes in the dataset. Currently, only convertToIri is supported, meaning blank nodes are converted to unique IRIs at load time. Must be provided when format is ntriples. For more information, see Handling RDF values. /// - deletionProtection: Indicates whether or not to enable deletion protection on the graph. The graph can’t be deleted when deletion protection is enabled. (true or false). /// - failOnError: If set to true, the task halts when an import error is encountered. If set to false, the task skips the data that caused the error and continues if possible. - /// - format: Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format. + /// - format: Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format, OPEN_CYPHER, which identifies the openCypher load format, or ntriples, which identifies the RDF n-triples format. /// - graphName: A name for the new Neptune Analytics graph to be created. The name must contain from 1 to 63 letters, numbers, or hyphens, and its first character must be a letter. It cannot end with a hyphen or contain two consecutive hyphens. /// - importOptions: Contains options for controlling the import process. For example, if the failOnError key is set to false, the import skips problem data and attempts to continue (whereas if set to true, the default, or if omitted, the import operation halts immediately when an error is encountered. /// - kmsKeyIdentifier: Specifies a KMS key to use to encrypt data imported into the new graph. diff --git a/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift b/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift index 268eb0a422..4a8dfab2f5 100644 --- a/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift +++ b/Sources/Soto/Services/NeptuneGraph/NeptuneGraph_shapes.swift @@ -242,7 +242,7 @@ extension NeptuneGraph { try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, min: 1) try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, max: 24576) - try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 32) + try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 16) try self.validate(self.replicaCount, name: "replicaCount", parent: name, max: 2) try self.validate(self.replicaCount, name: "replicaCount", parent: name, min: 0) try self.tags?.forEach { @@ -418,7 +418,7 @@ extension NeptuneGraph { public let deletionProtection: Bool? /// If set to true, the task halts when an import error is encountered. If set to false, the task skips the data that caused the error and continues if possible. public let failOnError: Bool? - /// Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format or OPENCYPHER, which identies the openCypher load format. + /// Specifies the format of S3 data to be imported. Valid values are CSV, which identifies the Gremlin CSV format, OPEN_CYPHER, which identifies the openCypher load format, or ntriples, which identifies the RDF n-triples format. public let format: Format? /// A name for the new Neptune Analytics graph to be created. The name must contain from 1 to 63 letters, numbers, or hyphens, and its first character must be a letter. It cannot end with a hyphen or contain two consecutive hyphens. public let graphName: String @@ -470,9 +470,9 @@ extension NeptuneGraph { try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, min: 1) try self.validate(self.kmsKeyIdentifier, name: "kmsKeyIdentifier", parent: name, pattern: "^arn:aws(|-cn|-us-gov):kms:[a-zA-Z0-9-]*:[0-9]{12}:key/[a-zA-Z0-9-]{36}$") try self.validate(self.maxProvisionedMemory, name: "maxProvisionedMemory", parent: name, max: 24576) - try self.validate(self.maxProvisionedMemory, name: "maxProvisionedMemory", parent: name, min: 32) + try self.validate(self.maxProvisionedMemory, name: "maxProvisionedMemory", parent: name, min: 16) try self.validate(self.minProvisionedMemory, name: "minProvisionedMemory", parent: name, max: 24576) - try self.validate(self.minProvisionedMemory, name: "minProvisionedMemory", parent: name, min: 32) + try self.validate(self.minProvisionedMemory, name: "minProvisionedMemory", parent: name, min: 16) try self.validate(self.replicaCount, name: "replicaCount", parent: name, max: 2) try self.validate(self.replicaCount, name: "replicaCount", parent: name, min: 0) try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:aws[^:]*:iam::\\d{12}:(role|role/service-role)/[\\w+=,.@-]*$") @@ -2054,7 +2054,7 @@ extension NeptuneGraph { try self.validate(self.graphName, name: "graphName", parent: name, min: 1) try self.validate(self.graphName, name: "graphName", parent: name, pattern: "^(?!g-)[a-z][a-z0-9]*(-[a-z0-9]+)*$") try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, max: 24576) - try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 32) + try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 16) try self.validate(self.replicaCount, name: "replicaCount", parent: name, max: 2) try self.validate(self.replicaCount, name: "replicaCount", parent: name, min: 0) try self.validate(self.snapshotIdentifier, name: "snapshotIdentifier", parent: name, pattern: "^gs-[a-z0-9]{10}$") @@ -2345,7 +2345,7 @@ extension NeptuneGraph { public func validate(name: String) throws { try self.validate(self.graphIdentifier, name: "graphIdentifier", parent: name, pattern: "^g-[a-z0-9]{10}$") try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, max: 24576) - try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 32) + try self.validate(self.provisionedMemory, name: "provisionedMemory", parent: name, min: 16) } private enum CodingKeys: String, CodingKey { diff --git a/Sources/Soto/Services/Organizations/Organizations_api.swift b/Sources/Soto/Services/Organizations/Organizations_api.swift index 15416e5f60..c26c06e47d 100644 --- a/Sources/Soto/Services/Organizations/Organizations_api.swift +++ b/Sources/Soto/Services/Organizations/Organizations_api.swift @@ -130,7 +130,7 @@ public struct Organizations: AWSService { return try await self.acceptHandshake(input, logger: logger) } - /// Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY This operation can be called only from the organization's + /// Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY This operation can be called only from the organization's /// management account or by a member account that is a delegated administrator for an Amazon Web Services service. @Sendable @inlinable @@ -144,7 +144,7 @@ public struct Organizations: AWSService { logger: logger ) } - /// Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY This operation can be called only from the organization's + /// Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY This operation can be called only from the organization's /// management account or by a member account that is a delegated administrator for an Amazon Web Services service. /// /// Parameters: @@ -193,7 +193,7 @@ public struct Organizations: AWSService { return try await self.cancelHandshake(input, logger: logger) } - /// Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following: Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation. While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizationsin the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide. + /// Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following: Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation. While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can't close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizations in the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide. @Sendable @inlinable public func closeAccount(_ input: CloseAccountRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -206,7 +206,7 @@ public struct Organizations: AWSService { logger: logger ) } - /// Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following: Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation. While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizationsin the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide. + /// Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following: Use the AccountId that you sent in the CloseAccount request to provide as a parameter to the DescribeAccount operation. While the close account request is in progress, Account status will indicate PENDING_CLOSURE. When the close account request completes, the status will change to SUSPENDED. Check the CloudTrail log for the CloseAccountResult event that gets published after the account closes successfully. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can't close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizations in the Organizations User Guide. To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status. If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide. /// /// Parameters: /// - accountId: Retrieves the Amazon Web Services account Id for the current CloseAccount API request. @@ -222,7 +222,7 @@ public struct Organizations: AWSService { return try await self.closeAccount(input, logger: logger) } - /// Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following: Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support. If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support. Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide. When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools. + /// Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following: Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support. If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support. It isn't recommended to use CreateAccount to create multiple temporary accounts, and using the CreateAccount API to close accounts is subject to a 30-day usage quota. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide. When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools. @Sendable @inlinable public func createAccount(_ input: CreateAccountRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAccountResponse { @@ -235,7 +235,7 @@ public struct Organizations: AWSService { logger: logger ) } - /// Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following: Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support. If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support. Using CreateAccount to create multiple temporary accounts isn't recommended. You can only close an account from the Billing and Cost Management console, and you must be signed in as the root user. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide. When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools. + /// Creates an Amazon Web Services account that is automatically a member of the organization whose credentials made the request. This is an asynchronous request that Amazon Web Services performs in the background. Because CreateAccount operates asynchronously, it can return a successful completion message even though account initialization might still be in progress. You might need to wait a few minutes before you can successfully access the account. To check the status of the request, do one of the following: Use the Id value of the CreateAccountStatus response element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation. Check the CloudTrail log for the CreateAccountResult event. For information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the Organizations User Guide. The user who calls the API to create an account must have the organizations:CreateAccount permission. If you enabled all features in the organization, Organizations creates the required service-linked role named AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the Organizations User Guide. If the request includes tags, then the requester must have the organizations:TagResource permission. Organizations preconfigures the new member account with a role (named OrganizationAccountAccessRole by default) that grants users in the management account administrator permissions in the new member account. Principals in the management account can assume the role. Organizations clones the company name and address information for the new account from the organization's management account. This operation can be called only from the organization's management account. For more information about creating accounts, see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such as a payment method is not automatically collected. If you must remove an account from your organization later, you can do so only after you provide the missing information. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. If you get an exception that indicates that you exceeded your account limits for the organization, contact Amazon Web Services Support. If you get an exception that indicates that the operation failed because your organization is still initializing, wait one hour and then try again. If the error persists, contact Amazon Web Services Support. It isn't recommended to use CreateAccount to create multiple temporary accounts, and using the CreateAccount API to close accounts is subject to a 30-day usage quota. For information on the requirements and process for closing an account, see Closing a member account in your organization in the Organizations User Guide. When you create a member account with this operation, you can choose whether to create the account with the IAM User and Role Access to Billing Information switch enabled. If you enable it, IAM users and roles that have appropriate permissions can view billing information for the account. If you disable it, only the account root user can access billing information. For information about how to disable this switch for an account, see Granting access to your billing information and tools. /// /// Parameters: /// - accountName: The friendly name of the member account. @@ -390,7 +390,7 @@ public struct Organizations: AWSService { /// - description: An optional description to assign to the policy. /// - name: The friendly name to assign to the policy. The regex pattern that is used to validate this parameter is a string of any of the characters in the ASCII character range. /// - tags: A list of tags that you want to attach to the newly created policy. For each tag in the list, you must specify both a tag key and a value. You can set the value to an empty string, but you can't set it to null. For more information about tagging, see Tagging Organizations resources in the Organizations User Guide. If any one of the tags is not valid or if you exceed the allowed number of tags for a policy, then the entire request fails and the policy is not created. - /// - type: The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - type: The type of policy to create. You can specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - logger: Logger use during operation @inlinable public func createPolicy( @@ -636,7 +636,7 @@ public struct Organizations: AWSService { /// Returns the contents of the effective policy for specified policy type and account. The effective policy is the aggregation of any policies of the specified type that the account inherits, plus any policy of that type that is directly attached to the account. This operation applies only to policy types other than service control policies (SCPs). For more information about policy inheritance, see Understanding management policy inheritance in the Organizations User Guide. This operation can be called from any account in the organization. /// /// Parameters: - /// - policyType: The type of policy that you want information about. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY TAG_POLICY + /// - policyType: The type of policy that you want information about. You can specify one of the following values: BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - targetId: When you're signed in as the management account, specify the ID of the account that you want details about. Specifying an organization root or organizational unit (OU) as the target is not supported. /// - logger: Logger use during operation @inlinable @@ -851,7 +851,7 @@ public struct Organizations: AWSService { /// management account or by a member account that is a delegated administrator for an Amazon Web Services service. To view the status of available policy types in the organization, use DescribeOrganization. /// /// Parameters: - /// - policyType: The policy type that you want to disable in this root. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - policyType: The policy type that you want to disable in this root. You can specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - rootId: The unique identifier (ID) of the root in which you want to disable a policy type. You can get the ID from the ListRoots operation. The regex pattern for a root ID string requires "r-" followed by from 4 to 32 lowercase letters or digits. /// - logger: Logger use during operation @inlinable @@ -867,7 +867,7 @@ public struct Organizations: AWSService { return try await self.disablePolicyType(input, logger: logger) } - /// Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts. We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features. + /// Provides an Amazon Web Services service (the service that is specified by ServicePrincipal) with permissions to view the structure of an organization, create a service-linked role in all the accounts in the organization, and allow the service to perform operations on behalf of the organization and its accounts. Establishing these permissions can be a first step in enabling the integration of an Amazon Web Services service with Organizations. We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features. @Sendable @inlinable public func enableAWSServiceAccess(_ input: EnableAWSServiceAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -880,7 +880,7 @@ public struct Organizations: AWSService { logger: logger ) } - /// Enables the integration of an Amazon Web Services service (the service that is specified by ServicePrincipal) with Organizations. When you enable integration, you allow the specified service to create a service-linked role in all the accounts in your organization. This allows the service to perform operations on your behalf in your organization and its accounts. We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features. + /// Provides an Amazon Web Services service (the service that is specified by ServicePrincipal) with permissions to view the structure of an organization, create a service-linked role in all the accounts in the organization, and allow the service to perform operations on behalf of the organization and its accounts. Establishing these permissions can be a first step in enabling the integration of an Amazon Web Services service with Organizations. We recommend that you enable integration between Organizations and the specified Amazon Web Services service by using the console or commands that are provided by the specified service. Doing so ensures that the service is aware that it can create the resources that are required for the integration. How the service creates those resources in the organization's accounts depends on that service. For more information, see the documentation for the other Amazon Web Services service. For more information about enabling services to integrate with Organizations, see Using Organizations with other Amazon Web Services services in the Organizations User Guide. You can only call this operation from the organization's management account and only if the organization has enabled all features. /// /// Parameters: /// - servicePrincipal: The service principal name of the Amazon Web Services service for which you want to enable integration with your organization. This is typically in the form of a URL, such as service-abbreviation.amazonaws.com. @@ -940,7 +940,7 @@ public struct Organizations: AWSService { /// management account or by a member account that is a delegated administrator for an Amazon Web Services service. You can enable a policy type in a root only if that policy type is available in the organization. To view the status of available policy types in the organization, use DescribeOrganization. /// /// Parameters: - /// - policyType: The policy type that you want to enable. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - policyType: The policy type that you want to enable. You can specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - rootId: The unique identifier (ID) of the root in which you want to enable a policy type. You can get the ID from the ListRoots operation. The regex pattern for a root ID string requires "r-" followed by from 4 to 32 lowercase letters or digits. /// - logger: Logger use during operation @inlinable @@ -991,7 +991,7 @@ public struct Organizations: AWSService { return try await self.inviteAccountToOrganization(input, logger: logger) } - /// Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead. This operation can be called only from a member account in the organization. The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization. You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear. Choose a support plan Provide and verify the required contact information Provide a current payment method Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization. You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide. After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags. A newly created account has a waiting period before it can be removed from its organization. If you get an error that indicates that a wait period is required, then try again in a few days. If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization. + /// Removes a member account from its parent organization. This version of the operation is performed by the account that wants to leave. To remove a member account as a user in the management account, use RemoveAccountFromOrganization instead. This operation can be called only from a member account in the organization. The management account in an organization with all features enabled can set service control policies (SCPs) that can restrict what administrators of member accounts can do. This includes preventing them from successfully calling LeaveOrganization and leaving the organization. You can leave an organization as a member account only if the account is configured with the information required to operate as a standalone account. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required of standalone accounts is not automatically collected. For each account that you want to make standalone, you must perform the following steps. If any of the steps are already completed for this account, that step doesn't appear. Choose a support plan Provide and verify the required contact information Provide a current payment method Amazon Web Services uses the payment method to charge for any billable (not free tier) Amazon Web Services activity that occurs while the account isn't attached to an organization. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. The account that you want to leave must not be a delegated administrator account for any Amazon Web Services service enabled for your organization. If the account is a delegated administrator, you must first change the delegated administrator account to another account that is remaining in the organization. You can leave an organization only after you enable IAM user access to billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the Amazon Web Services Billing and Cost Management User Guide. After the account leaves the organization, all tags that were attached to the account object in the organization are deleted. Amazon Web Services accounts outside of an organization do not support tags. A newly created account has a waiting period before it can be removed from its organization. You must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period. If you are using an organization principal to call LeaveOrganization across multiple accounts, you can only do this up to 5 accounts per second in a single organization. @Sendable @inlinable public func leaveOrganization(logger: Logger = AWSClient.loggingDisabled) async throws { @@ -1496,7 +1496,7 @@ public struct Organizations: AWSService { /// management account or by a member account that is a delegated administrator for an Amazon Web Services service. /// /// Parameters: - /// - filter: Specifies the type of policy that you want to include in the response. You must specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - filter: Specifies the type of policy that you want to include in the response. You must specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - maxResults: The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. /// - nextToken: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. /// - logger: Logger use during operation @@ -1541,7 +1541,7 @@ public struct Organizations: AWSService { /// management account or by a member account that is a delegated administrator for an Amazon Web Services service. /// /// Parameters: - /// - filter: The type of policy that you want to include in the returned list. You must specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - filter: The type of policy that you want to include in the returned list. You must specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - maxResults: The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. /// - nextToken: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. /// - targetId: The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list. The regex pattern for a target ID string requires one of the following: Root - A string that begins with "r-" followed by from 4 to 32 lowercase letters or digits. Account - A string that consists of exactly 12 digits. Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is in). This string is followed by a second "-" dash and from 8 to 32 additional lowercase letters or digits. @@ -2391,7 +2391,7 @@ extension Organizations { /// Return PaginatorSequence for operation ``listPolicies(_:logger:)``. /// /// - Parameters: - /// - filter: Specifies the type of policy that you want to include in the response. You must specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - filter: Specifies the type of policy that you want to include in the response. You must specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - maxResults: The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. /// - logger: Logger used for logging @inlinable @@ -2428,7 +2428,7 @@ extension Organizations { /// Return PaginatorSequence for operation ``listPoliciesForTarget(_:logger:)``. /// /// - Parameters: - /// - filter: The type of policy that you want to include in the returned list. You must specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// - filter: The type of policy that you want to include in the returned list. You must specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY /// - maxResults: The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. /// - targetId: The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list. The regex pattern for a target ID string requires one of the following: Root - A string that begins with "r-" followed by from 4 to 32 lowercase letters or digits. Account - A string that consists of exactly 12 digits. Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is in). This string is followed by a second "-" dash and from 8 to 32 additional lowercase letters or digits. /// - logger: Logger used for logging diff --git a/Sources/Soto/Services/Organizations/Organizations_shapes.swift b/Sources/Soto/Services/Organizations/Organizations_shapes.swift index 3be4c198d5..da69bdbd8a 100644 --- a/Sources/Soto/Services/Organizations/Organizations_shapes.swift +++ b/Sources/Soto/Services/Organizations/Organizations_shapes.swift @@ -82,6 +82,7 @@ extension Organizations { public enum EffectivePolicyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case aiservicesOptOutPolicy = "AISERVICES_OPT_OUT_POLICY" case backupPolicy = "BACKUP_POLICY" + case chatbotPolicy = "CHATBOT_POLICY" case tagPolicy = "TAG_POLICY" public var description: String { return self.rawValue } } @@ -136,6 +137,7 @@ extension Organizations { public enum PolicyType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case aiservicesOptOutPolicy = "AISERVICES_OPT_OUT_POLICY" case backupPolicy = "BACKUP_POLICY" + case chatbotPolicy = "CHATBOT_POLICY" case serviceControlPolicy = "SERVICE_CONTROL_POLICY" case tagPolicy = "TAG_POLICY" public var description: String { return self.rawValue } @@ -564,7 +566,7 @@ extension Organizations { public let name: String /// A list of tags that you want to attach to the newly created policy. For each tag in the list, you must specify both a tag key and a value. You can set the value to an empty string, but you can't set it to null. For more information about tagging, see Tagging Organizations resources in the Organizations User Guide. If any one of the tags is not valid or if you exceed the allowed number of tags for a policy, then the entire request fails and the policy is not created. public let tags: [Tag]? - /// The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// The type of policy to create. You can specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY public let type: PolicyType @inlinable @@ -836,7 +838,7 @@ extension Organizations { } public struct DescribeEffectivePolicyRequest: AWSEncodableShape { - /// The type of policy that you want information about. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY TAG_POLICY + /// The type of policy that you want information about. You can specify one of the following values: BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY public let policyType: EffectivePolicyType /// When you're signed in as the management account, specify the ID of the account that you want details about. Specifying an organization root or organizational unit (OU) as the target is not supported. public let targetId: String? @@ -906,7 +908,7 @@ extension Organizations { } public struct DescribeOrganizationResponse: AWSDecodableShape { - /// A structure that contains information about the organization. The AvailablePolicyTypes part of the response is deprecated, and you shouldn't use it in your apps. It doesn't include any policy type supported by Organizations other than SCPs. To determine which policy types are enabled in your organization, use the ListRoots operation. + /// A structure that contains information about the organization. The AvailablePolicyTypes part of the response is deprecated, and you shouldn't use it in your apps. It doesn't include any policy type supported by Organizations other than SCPs. In the China (Ningxia) Region, no policy type is included. To determine which policy types are enabled in your organization, use the ListRoots operation. public let organization: Organization? @inlinable @@ -1045,7 +1047,7 @@ extension Organizations { } public struct DisablePolicyTypeRequest: AWSEncodableShape { - /// The policy type that you want to disable in this root. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// The policy type that you want to disable in this root. You can specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY public let policyType: PolicyType /// The unique identifier (ID) of the root in which you want to disable a policy type. You can get the ID from the ListRoots operation. The regex pattern for a root ID string requires "r-" followed by from 4 to 32 lowercase letters or digits. public let rootId: String @@ -1146,7 +1148,7 @@ extension Organizations { } public struct EnablePolicyTypeRequest: AWSEncodableShape { - /// The policy type that you want to enable. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// The policy type that you want to enable. You can specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY public let policyType: PolicyType /// The unique identifier (ID) of the root in which you want to enable a policy type. You can get the ID from the ListRoots operation. The regex pattern for a root ID string requires "r-" followed by from 4 to 32 lowercase letters or digits. public let rootId: String @@ -1885,7 +1887,7 @@ extension Organizations { } public struct ListPoliciesForTargetRequest: AWSEncodableShape { - /// The type of policy that you want to include in the returned list. You must specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// The type of policy that you want to include in the returned list. You must specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY public let filter: PolicyType /// The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. public let maxResults: Int? @@ -1938,7 +1940,7 @@ extension Organizations { } public struct ListPoliciesRequest: AWSEncodableShape { - /// Specifies the type of policy that you want to include in the response. You must specify one of the following values: AISERVICES_OPT_OUT_POLICY BACKUP_POLICY SERVICE_CONTROL_POLICY TAG_POLICY + /// Specifies the type of policy that you want to include in the response. You must specify one of the following values: SERVICE_CONTROL_POLICY BACKUP_POLICY TAG_POLICY CHATBOT_POLICY AISERVICES_OPT_OUT_POLICY public let filter: PolicyType /// The total number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value that is specific to the operation. If additional items exist beyond the maximum you specify, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. Note that Organizations might return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. public let maxResults: Int? @@ -2741,7 +2743,7 @@ public struct OrganizationsErrorType: AWSErrorType { public static var concurrentModificationException: Self { .init(.concurrentModificationException) } /// The request failed because it conflicts with the current state of the specified resource. public static var conflictException: Self { .init(.conflictException) } - /// Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit: Some of the reasons in the following list might not be applicable to this specific API or operation. ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself. ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide. ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day. ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization. ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit. Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts. Deleted and closed accounts still count toward your limit. If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support. CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator. CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator. CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​ CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator. CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days. CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​ CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode. DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service. EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code. HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day. INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments. MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace. MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support. MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again. MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide. MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal. MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time. MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource. MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required. ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation. OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep. OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization. POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size. POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization. SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first. TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account. WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting period before you can remove it from the organization. If you get an error that indicates that a wait period is required, try again in a few days. + /// Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit: Some of the reasons in the following list might not be applicable to this specific API or operation. ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management account from the organization. You can't remove the management account. Instead, after you remove all member accounts, delete the organization itself. ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an account from the organization that doesn't yet have enough information to exist as a standalone account. This account requires you to first complete phone verification. Follow the steps at Removing a member account from your organization in the Organizations User Guide. ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can create in one day. ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your account isn't fully active. You must complete the account setup before you create an organization. ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to request an increase in your limit. Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in your organization. Send fewer invitations or contact Amazon Web Services Support to request an increase in the number of accounts. Deleted and closed accounts still count toward your limit. If you get this exception when running a command immediately after creating the organization, wait one hour and try again. After an hour, if the command continues to fail with this error, contact Amazon Web Services Support. CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot register a suspended account as a delegated administrator. CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the management account of the organization as a delegated administrator for an Amazon Web Services service integrated with Organizations. You can designate only a member account as a delegated administrator. CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management account. To close the management account for the organization, you must first either remove or close all member accounts in the organization. Follow standard account closure process using root credentials.​ CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an account that is registered as a delegated administrator for a service integrated with your organization. To complete this operation, you must first deregister this account as a delegated administrator. CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the past 30 days. CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of accounts that you can close at a time. ​ CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an organization in the specified region, you must enable all features mode. DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has a delegated administrator. To complete this operation, you must first deregister any existing delegated administrators for this service. EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a limited period of time. You must resubmit the request and generate a new verfication code. HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes that you can send in one day. INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported payment method is associated with the account. Amazon Web Services does not support cards issued by financial institutions in Russia or Belarus. For more information, see Managing your Amazon Web Services payments. MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this organization, you first must migrate the organization's management account to the marketplace that corresponds to the management account's address. All accounts in an organization must be associated with the same marketplace. MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in China. To create an organization, the master must have a valid business license. For more information, contact customer support. MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first provide a valid contact address and phone number for the management account. Then try the operation again. MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the management account must have an associated account in the Amazon Web Services GovCloud (US-West) Region. For more information, see Organizations in the Amazon Web Services GovCloud User Guide. MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this management account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to register more delegated administrators than allowed for the service principal. MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number of policies of a certain type that can be attached to an entity at one time. MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this resource. MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with this member account, you first must associate a valid payment instrument, such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in the Organizations User Guide. MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from an entity that would cause the entity to have fewer than the minimum number of policies of a certain type required. ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation that requires the organization to be configured to support all features. An organization that supports only consolidated billing features can't perform this operation. OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many levels deep. OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you can have in an organization. POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger than the maximum size. POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies that you can have in an organization. SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated administrator before you enabled service access. Call the EnableAWSServiceAccess API first. TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags that are not compliant with the tag policy requirements for this account. WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period. public static var constraintViolationException: Self { .init(.constraintViolationException) } /// We can't find an create account request with the CreateAccountRequestId that you specified. public static var createAccountStatusNotFoundException: Self { .init(.createAccountStatusNotFoundException) } diff --git a/Sources/Soto/Services/Outposts/Outposts_shapes.swift b/Sources/Soto/Services/Outposts/Outposts_shapes.swift index 617f37395c..5649379fca 100644 --- a/Sources/Soto/Services/Outposts/Outposts_shapes.swift +++ b/Sources/Soto/Services/Outposts/Outposts_shapes.swift @@ -125,6 +125,7 @@ extension Outposts { public enum OrderStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cancelled = "CANCELLED" case completed = "COMPLETED" + case delivered = "DELIVERED" case error = "ERROR" case fulfilled = "FULFILLED" case inProgress = "IN_PROGRESS" @@ -1850,7 +1851,7 @@ extension Outposts { public let paymentOption: PaymentOption? /// The payment term. public let paymentTerm: PaymentTerm? - /// The status of the order. PREPARING - Order is received and being prepared. IN_PROGRESS - Order is either being built, shipped, or installed. To get more details, see the line item status. COMPLETED - Order is complete. CANCELLED - Order is cancelled. ERROR - Customer should contact support. The following status are deprecated: RECEIVED, PENDING, PROCESSING, INSTALLING, and FULFILLED. + /// The status of the order. PREPARING - Order is received and being prepared. IN_PROGRESS - Order is either being built or shipped. To get more details, see the line item status. DELIVERED - Order was delivered to the Outpost site. COMPLETED - Order is complete. CANCELLED - Order is cancelled. ERROR - Customer should contact support. The following status are deprecated: RECEIVED, PENDING, PROCESSING, INSTALLING, and FULFILLED. public let status: OrderStatus? @inlinable diff --git a/Sources/Soto/Services/PCS/PCS_api.swift b/Sources/Soto/Services/PCS/PCS_api.swift index f3d757ab69..92b3cce5b8 100644 --- a/Sources/Soto/Services/PCS/PCS_api.swift +++ b/Sources/Soto/Services/PCS/PCS_api.swift @@ -148,7 +148,7 @@ public struct PCS: AWSService { /// - clusterIdentifier: The name or ID of the cluster to create a compute node group in. /// - computeNodeGroupName: A name to identify the cluster. Example: MyCluster /// - customLaunchTemplate: - /// - iamInstanceProfileArn: The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached in order to provision instances correctly. The resource identifier of the ARN must start with AWSPCS. For example, arn:aws:iam:123456789012:instance-profile/AWSPCSMyComputeNodeInstanceProfile. + /// - iamInstanceProfileArn: The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have the pcs:RegisterComputeNodeGroupInstance permission. The resource identifier of the ARN must start with AWSPCS or it must have /aws-pcs/ in its path. Examples arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1 arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2 /// - instanceConfigs: A list of EC2 instance configurations that Amazon Web Services PCS can provision in the compute node group. /// - purchaseOption: Specifies how EC2 instances are purchased on your behalf. Amazon Web Services PCS supports On-Demand and Spot instances. For more information, see Instance purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide this option, it defaults to On-Demand. /// - scalingConfiguration: Specifies the boundaries of the compute node group auto scaling. @@ -676,7 +676,7 @@ public struct PCS: AWSService { /// - clusterIdentifier: The name or ID of the cluster of the compute node group. /// - computeNodeGroupIdentifier: The name or ID of the compute node group. /// - customLaunchTemplate: - /// - iamInstanceProfileArn: The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances correctly. + /// - iamInstanceProfileArn: The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have the pcs:RegisterComputeNodeGroupInstance permission. The resource identifier of the ARN must start with AWSPCS or it must have /aws-pcs/ in its path. Examples arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1 arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2 /// - purchaseOption: Specifies how EC2 instances are purchased on your behalf. Amazon Web Services PCS supports On-Demand and Spot instances. For more information, see Instance purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide this option, it defaults to On-Demand. /// - scalingConfiguration: Specifies the boundaries of the compute node group auto scaling. /// - slurmConfiguration: Additional options related to the Slurm scheduler. diff --git a/Sources/Soto/Services/PCS/PCS_shapes.swift b/Sources/Soto/Services/PCS/PCS_shapes.swift index 7bb117ccd4..7c3429c024 100644 --- a/Sources/Soto/Services/PCS/PCS_shapes.swift +++ b/Sources/Soto/Services/PCS/PCS_shapes.swift @@ -235,7 +235,7 @@ extension PCS { public let customLaunchTemplate: CustomLaunchTemplate /// The list of errors that occurred during compute node group provisioning. public let errorInfo: [ErrorInfo]? - /// The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances correctly. + /// The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have the pcs:RegisterComputeNodeGroupInstance permission. The resource identifier of the ARN must start with AWSPCS or it must have /aws-pcs/ in its path. Examples arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1 arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2 public let iamInstanceProfileArn: String /// The generated unique ID of the compute node group. public let id: String @@ -455,7 +455,7 @@ extension PCS { /// A name to identify the cluster. Example: MyCluster public let computeNodeGroupName: String public let customLaunchTemplate: CustomLaunchTemplate - /// The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached in order to provision instances correctly. The resource identifier of the ARN must start with AWSPCS. For example, arn:aws:iam:123456789012:instance-profile/AWSPCSMyComputeNodeInstanceProfile. + /// The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have the pcs:RegisterComputeNodeGroupInstance permission. The resource identifier of the ARN must start with AWSPCS or it must have /aws-pcs/ in its path. Examples arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1 arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2 public let iamInstanceProfileArn: String /// A list of EC2 instance configurations that Amazon Web Services PCS can provision in the compute node group. public let instanceConfigs: [InstanceConfig] @@ -1308,7 +1308,7 @@ extension PCS { } public struct SlurmCustomSetting: AWSEncodableShape & AWSDecodableShape { - /// Amazon Web Services PCS supports configuration of the following Slurm parameters: Prolog , Epilog , and SelectTypeParameters . + /// Amazon Web Services PCS supports configuration of the following Slurm parameters: For clusters Prolog Epilog SelectTypeParameters For compute node groups Weight RealMemory public let parameterName: String /// The values for the configured Slurm settings. public let parameterValue: String @@ -1416,7 +1416,7 @@ extension PCS { /// The name or ID of the compute node group. public let computeNodeGroupIdentifier: String public let customLaunchTemplate: CustomLaunchTemplate? - /// The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances correctly. + /// The Amazon Resource Name (ARN) of the IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have the pcs:RegisterComputeNodeGroupInstance permission. The resource identifier of the ARN must start with AWSPCS or it must have /aws-pcs/ in its path. Examples arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1 arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2 public let iamInstanceProfileArn: String? /// Specifies how EC2 instances are purchased on your behalf. Amazon Web Services PCS supports On-Demand and Spot instances. For more information, see Instance purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide this option, it defaults to On-Demand. public let purchaseOption: PurchaseOption? diff --git a/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift b/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift index fc28cfa35b..210d1cd629 100644 --- a/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift +++ b/Sources/Soto/Services/PcaConnectorScep/PcaConnectorScep_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS PcaConnectorScep service. /// -/// Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change. Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide. +/// Connector for SCEP creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide. public struct PcaConnectorScep: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift index dd49b93ef3..748410aab0 100644 --- a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift +++ b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_api.swift @@ -111,8 +111,8 @@ public struct PinpointSMSVoiceV2: AWSService { /// Parameters: /// - clientToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you don't specify a client token, a randomly generated token is used for the request to ensure idempotency. /// - isoCountryCode: The new two-character code, in ISO 3166-1 alpha-2 format, for the country or region of the origination identity. - /// - originationIdentity: The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. - /// - poolId: The pool to update with the new Identity. This value can be either the PoolId or PoolArn, and you can find these values using DescribePools. + /// - originationIdentity: The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). + /// - poolId: The pool to update with the new Identity. This value can be either the PoolId or PoolArn, and you can find these values using DescribePools. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func associateOriginationIdentity( @@ -300,7 +300,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - deletionProtectionEnabled: By default this is set to false. When set to true the pool can't be deleted. You can change this value using the UpdatePool action. /// - isoCountryCode: The new two-character code, in ISO 3166-1 alpha-2 format, for the country or region of the new pool. /// - messageType: The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. After the pool is created the MessageType can't be changed. - /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity. + /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - tags: An array of tags (key and value pairs) associated with the pool. /// - logger: Logger use during operation @inlinable @@ -690,7 +690,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// /// Parameters: /// - keyword: The keyword to delete. - /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or PoolArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn and DescribePools to find the values of PoolId and PoolArn. + /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or PoolArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn and DescribePools to find the values of PoolId and PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func deleteKeyword( @@ -747,7 +747,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// Deletes an existing opt-out list. All opted out phone numbers in the opt-out list are deleted. If the specified opt-out list name doesn't exist or is in-use by an origination phone number or pool, an error is returned. /// /// Parameters: - /// - optOutListName: The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. + /// - optOutListName: The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func deleteOptOutList( @@ -777,7 +777,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// /// Parameters: /// - optedOutNumber: The phone number, in E.164 format, to remove from the OptOutList. - /// - optOutListName: The OptOutListName or OptOutListArn to remove the phone number from. + /// - optOutListName: The OptOutListName or OptOutListArn to remove the phone number from. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func deleteOptedOutNumber( @@ -808,7 +808,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// Deletes an existing pool. Deleting a pool disassociates all origination identities from that pool. If the pool status isn't active or if deletion protection is enabled, an error is returned. A pool is a collection of phone numbers and SenderIds. A pool can include one or more phone numbers and SenderIds that are associated with your Amazon Web Services account. /// /// Parameters: - /// - poolId: The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn . + /// - poolId: The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn . If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func deletePool( @@ -940,6 +940,35 @@ public struct PinpointSMSVoiceV2: AWSService { return try await self.deleteRegistrationFieldValue(input, logger: logger) } + /// Deletes the resource-based policy document attached to the AWS End User Messaging SMS and Voice resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. + @Sendable + @inlinable + public func deleteResourcePolicy(_ input: DeleteResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteResourcePolicyResult { + try await self.client.execute( + operation: "DeleteResourcePolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes the resource-based policy document attached to the AWS End User Messaging SMS and Voice resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource you're deleting the resource-based policy from. + /// - logger: Logger use during operation + @inlinable + public func deleteResourcePolicy( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteResourcePolicyResult { + let input = DeleteResourcePolicyRequest( + resourceArn: resourceArn + ) + return try await self.deleteResourcePolicy(input, logger: logger) + } + /// Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas in the AWS End User Messaging SMS User Guide. @Sendable @inlinable @@ -1143,7 +1172,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - keywords: An array of keywords to search for. /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. - /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. + /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func describeKeywords( @@ -1182,19 +1211,22 @@ public struct PinpointSMSVoiceV2: AWSService { /// Parameters: /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. - /// - optOutListNames: The OptOutLists to show the details of. This is an array of strings that can be either the OptOutListName or OptOutListArn. + /// - optOutListNames: The OptOutLists to show the details of. This is an array of strings that can be either the OptOutListName or OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). + /// - owner: Use SELF to filter the list of Opt-Out List to ones your account owns or use SHARED to filter on Opt-Out List shared with your account. The Owner and OptOutListNames parameters can't be used at the same time. /// - logger: Logger use during operation @inlinable public func describeOptOutLists( maxResults: Int? = nil, nextToken: String? = nil, optOutListNames: [String]? = nil, + owner: Owner? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DescribeOptOutListsResult { let input = DescribeOptOutListsRequest( maxResults: maxResults, nextToken: nextToken, - optOutListNames: optOutListNames + optOutListNames: optOutListNames, + owner: owner ) return try await self.describeOptOutLists(input, logger: logger) } @@ -1219,7 +1251,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. /// - optedOutNumbers: An array of phone numbers to search for in the OptOutList. - /// - optOutListName: The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. + /// - optOutListName: The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func describeOptedOutNumbers( @@ -1259,13 +1291,15 @@ public struct PinpointSMSVoiceV2: AWSService { /// - filters: An array of PhoneNumberFilter objects to filter the results. /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. - /// - phoneNumberIds: The unique identifier of phone numbers to find information about. This is an array of strings that can be either the PhoneNumberId or PhoneNumberArn. + /// - owner: Use SELF to filter the list of phone numbers to ones your account owns or use SHARED to filter on phone numbers shared with your account. The Owner and PhoneNumberIds parameters can't be used at the same time. + /// - phoneNumberIds: The unique identifier of phone numbers to find information about. This is an array of strings that can be either the PhoneNumberId or PhoneNumberArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func describePhoneNumbers( filters: [PhoneNumberFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, + owner: Owner? = nil, phoneNumberIds: [String]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DescribePhoneNumbersResult { @@ -1273,6 +1307,7 @@ public struct PinpointSMSVoiceV2: AWSService { filters: filters, maxResults: maxResults, nextToken: nextToken, + owner: owner, phoneNumberIds: phoneNumberIds ) return try await self.describePhoneNumbers(input, logger: logger) @@ -1297,13 +1332,15 @@ public struct PinpointSMSVoiceV2: AWSService { /// - filters: An array of PoolFilter objects to filter the results. /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. - /// - poolIds: The unique identifier of pools to find. This is an array of strings that can be either the PoolId or PoolArn. + /// - owner: Use SELF to filter the list of Pools to ones your account owns or use SHARED to filter on Pools shared with your account. The Owner and PoolIds parameters can't be used at the same time. + /// - poolIds: The unique identifier of pools to find. This is an array of strings that can be either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func describePools( filters: [PoolFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, + owner: Owner? = nil, poolIds: [String]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DescribePoolsResult { @@ -1311,6 +1348,7 @@ public struct PinpointSMSVoiceV2: AWSService { filters: filters, maxResults: maxResults, nextToken: nextToken, + owner: owner, poolIds: poolIds ) return try await self.describePools(input, logger: logger) @@ -1651,13 +1689,15 @@ public struct PinpointSMSVoiceV2: AWSService { /// - filters: An array of SenderIdFilter objects to filter the results. /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. - /// - senderIds: An array of SenderIdAndCountry objects to search for. + /// - owner: Use SELF to filter the list of Sender Ids to ones your account owns or use SHARED to filter on Sender Ids shared with your account. The Owner and SenderIds parameters can't be used at the same time. + /// - senderIds: An array of SenderIdAndCountry objects to search for. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func describeSenderIds( filters: [SenderIdFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, + owner: Owner? = nil, senderIds: [SenderIdAndCountry]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DescribeSenderIdsResult { @@ -1665,6 +1705,7 @@ public struct PinpointSMSVoiceV2: AWSService { filters: filters, maxResults: maxResults, nextToken: nextToken, + owner: owner, senderIds: senderIds ) return try await self.describeSenderIds(input, logger: logger) @@ -1761,8 +1802,8 @@ public struct PinpointSMSVoiceV2: AWSService { /// Parameters: /// - clientToken: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you don't specify a client token, a randomly generated token is used for the request to ensure idempotency. /// - isoCountryCode: The two-character code, in ISO 3166-1 alpha-2 format, for the country or region. - /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for SenderId and SenderIdArn. - /// - poolId: The unique identifier for the pool to disassociate with the origination identity. This value can be either the PoolId or PoolArn. + /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). + /// - poolId: The unique identifier for the pool to disassociate with the origination identity. This value can be either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func disassociateOriginationIdentity( @@ -1874,6 +1915,35 @@ public struct PinpointSMSVoiceV2: AWSService { return try await self.getProtectConfigurationCountryRuleSet(input, logger: logger) } + /// Retrieves the JSON text of the resource-based policy document attached to the AWS End User Messaging SMS and Voice resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. + @Sendable + @inlinable + public func getResourcePolicy(_ input: GetResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetResourcePolicyResult { + try await self.client.execute( + operation: "GetResourcePolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Retrieves the JSON text of the resource-based policy document attached to the AWS End User Messaging SMS and Voice resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy. + /// - logger: Logger use during operation + @inlinable + public func getResourcePolicy( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetResourcePolicyResult { + let input = GetResourcePolicyRequest( + resourceArn: resourceArn + ) + return try await self.getResourcePolicy(input, logger: logger) + } + /// Lists all associated origination identities in your pool. If you specify filters, the output includes information for only those origination identities that meet the filter criteria. @Sendable @inlinable @@ -1893,7 +1963,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - filters: An array of PoolOriginationIdentitiesFilter objects to filter the results.. /// - maxResults: The maximum number of results to return per each request. /// - nextToken: The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. - /// - poolId: The unique identifier for the pool. This value can be either the PoolId or PoolArn. + /// - poolId: The unique identifier for the pool. This value can be either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func listPoolOriginationIdentities( @@ -1998,7 +2068,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - keyword: The new keyword to add. /// - keywordAction: The action to perform for the new keyword when it is received. AUTOMATIC_RESPONSE: A message is sent to the recipient. OPT_OUT: Keeps the recipient from receiving future messages. OPT_IN: The recipient wants to receive future messages. /// - keywordMessage: The message associated with the keyword. - /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. + /// - originationIdentity: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func putKeyword( @@ -2034,7 +2104,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// /// Parameters: /// - optedOutNumber: The phone number to add to the OptOutList in E.164 format. - /// - optOutListName: The OptOutListName or OptOutListArn to add the phone number to. + /// - optOutListName: The OptOutListName or OptOutListArn to add the phone number to. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func putOptedOutNumber( @@ -2090,6 +2160,38 @@ public struct PinpointSMSVoiceV2: AWSService { return try await self.putRegistrationFieldValue(input, logger: logger) } + /// Attaches a resource-based policy to a AWS End User Messaging SMS and Voice resource(phone number, sender Id, phone poll, or opt-out list) that is used for sharing the resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. For more information about resource-based policies, see Working with shared resources in the AWS End User Messaging SMS User Guide. + @Sendable + @inlinable + public func putResourcePolicy(_ input: PutResourcePolicyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutResourcePolicyResult { + try await self.client.execute( + operation: "PutResourcePolicy", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Attaches a resource-based policy to a AWS End User Messaging SMS and Voice resource(phone number, sender Id, phone poll, or opt-out list) that is used for sharing the resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. For more information about resource-based policies, see Working with shared resources in the AWS End User Messaging SMS User Guide. + /// + /// Parameters: + /// - policy: The JSON formatted resource-based policy to attach. + /// - resourceArn: The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource to attach the resource-based policy to. + /// - logger: Logger use during operation + @inlinable + public func putResourcePolicy( + policy: String, + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> PutResourcePolicyResult { + let input = PutResourcePolicyRequest( + policy: policy, + resourceArn: resourceArn + ) + return try await self.putResourcePolicy(input, logger: logger) + } + /// Releases an existing origination phone number in your account. Once released, a phone number is no longer available for sending messages. If the origination phone number has deletion protection enabled or is associated with a pool, an error is returned. @Sendable @inlinable @@ -2106,7 +2208,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// Releases an existing origination phone number in your account. Once released, a phone number is no longer available for sending messages. If the origination phone number has deletion protection enabled or is associated with a pool, an error is returned. /// /// Parameters: - /// - phoneNumberId: The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and PhoneNumberArn. + /// - phoneNumberId: The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and PhoneNumberArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - logger: Logger use during operation @inlinable public func releasePhoneNumber( @@ -2173,8 +2275,8 @@ public struct PinpointSMSVoiceV2: AWSService { /// - messageType: The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. /// - numberCapabilities: Indicates if the phone number will be used for text messages, voice messages, or both. /// - numberType: The type of phone number to request. - /// - optOutListName: The name of the OptOutList to associate with the phone number. You can use the OptOutListName or OptOutListArn. - /// - poolId: The pool to associated with the phone number. You can use the PoolId or PoolArn. + /// - optOutListName: The name of the OptOutList to associate with the phone number. You can use the OptOutListName or OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). + /// - poolId: The pool to associated with the phone number. You can use the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - registrationId: Use this field to attach your phone number for an external registration process. /// - tags: An array of tags (key and value pairs) associate with the requested phone number. /// - logger: Logger use during operation @@ -2271,7 +2373,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - context: You can specify custom data in this field. If you do, that data is logged to the event destination. /// - destinationCountryParameters: This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for sending SMS messages to recipients in India. /// - languageCode: Choose the language to use for the message. - /// - originationIdentity: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + /// - originationIdentity: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - verificationChannel: Choose to send the verification code as an SMS or voice message. /// - verifiedDestinationNumberId: The unique identifier for the verified destination phone number. /// - logger: Logger use during operation @@ -2321,7 +2423,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - maxPrice: The maximum amount that you want to spend, in US dollars, per each MMS message. /// - mediaUrls: An array of URLs to each media file to send. The media files have to be stored in a publicly available S3 bucket. Supported media file formats are listed in MMS file types, size and character limits. For more information on creating an S3 bucket and managing objects, see Creating a bucket and Uploading objects in the S3 user guide. /// - messageBody: The text body of the message. - /// - originationIdentity: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + /// - originationIdentity: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - protectConfigurationId: The unique identifier of the protect configuration to use. /// - timeToLive: How long the text message is valid for. By default this is 72 hours. /// - logger: Logger use during operation @@ -2379,7 +2481,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - maxPrice: The maximum amount that you want to spend, in US dollars, per each text message. If the calculated amount to send the text message is greater than MaxPrice, the message is not sent and an error is returned. /// - messageBody: The body of the text message. /// - messageType: The type of message. Valid values are for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. - /// - originationIdentity: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + /// - originationIdentity: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - protectConfigurationId: The unique identifier for the protect configuration. /// - timeToLive: How long the text message is valid for, in seconds. By default this is 72 hours. If the messages isn't handed off before the TTL expires we stop attempting to hand off the message and return TTL_EXPIRED event. /// - logger: Logger use during operation @@ -2439,7 +2541,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// - maxPricePerMinute: The maximum amount to spend per voice message, in US dollars. /// - messageBody: The text to convert to a voice message. /// - messageBodyTextType: Specifies if the MessageBody field contains text or speech synthesis markup language (SSML). TEXT: This is the default value. When used the maximum character limit is 3000. SSML: When used the maximum character limit is 6000 including SSML tagging. - /// - originationIdentity: The origination identity to use for the voice call. This can be the PhoneNumber, PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn. + /// - originationIdentity: The origination identity to use for the voice call. This can be the PhoneNumber, PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - protectConfigurationId: The unique identifier for the protect configuration. /// - timeToLive: How long the voice message is valid for. By default this is 72 hours. /// - voiceId: The voice for the Amazon Polly service to use. By default this is set to "MATTHEW". @@ -2813,7 +2915,7 @@ public struct PinpointSMSVoiceV2: AWSService { /// Parameters: /// - deletionProtectionEnabled: By default this is set to false. When set to true the phone number can't be deleted. /// - optOutListName: The OptOutList to add the phone number to. Valid values for this field can be either the OutOutListName or OutOutListArn. - /// - phoneNumberId: The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn. + /// - phoneNumberId: The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - selfManagedOptOutsEnabled: By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. /// - twoWayChannelArn: The Amazon Resource Name (ARN) of the two way channel. /// - twoWayChannelRole: An optional IAM Role Arn for a service to assume, to be able to post inbound SMS messages. @@ -2859,8 +2961,8 @@ public struct PinpointSMSVoiceV2: AWSService { /// /// Parameters: /// - deletionProtectionEnabled: When set to true the pool can't be deleted. - /// - optOutListName: The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. - /// - poolId: The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn. + /// - optOutListName: The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). + /// - poolId: The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). /// - selfManagedOptOutsEnabled: By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. /// - sharedRoutesEnabled: Indicates whether shared routes are enabled for the pool. /// - twoWayChannelArn: The Amazon Resource Name (ARN) of the two way channel. diff --git a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift index 6bc7a809e6..21060d9ea9 100644 --- a/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift +++ b/Sources/Soto/Services/PinpointSMSVoiceV2/PinpointSMSVoiceV2_shapes.swift @@ -197,6 +197,12 @@ extension PinpointSMSVoiceV2 { public var description: String { return self.rawValue } } + public enum Owner: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case _self = "SELF" + case shared = "SHARED" + public var description: String { return self.rawValue } + } + public enum PhoneNumberFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case deletionProtectionEnabled = "deletion-protection-enabled" case isoCountryCode = "iso-country-code" @@ -471,9 +477,9 @@ extension PinpointSMSVoiceV2 { public let clientToken: String? /// The new two-character code, in ISO 3166-1 alpha-2 format, for the country or region of the origination identity. public let isoCountryCode: String - /// The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. + /// The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String - /// The pool to update with the new Identity. This value can be either the PoolId or PoolArn, and you can find these values using DescribePools. + /// The pool to update with the new Identity. This value can be either the PoolId or PoolArn, and you can find these values using DescribePools. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolId: String @inlinable @@ -891,7 +897,7 @@ extension PinpointSMSVoiceV2 { public let isoCountryCode: String /// The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. After the pool is created the MessageType can't be changed. public let messageType: MessageType - /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity. + /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String /// An array of tags (key and value pairs) associated with the pool. public let tags: [Tag]? @@ -1608,7 +1614,7 @@ extension PinpointSMSVoiceV2 { public struct DeleteKeywordRequest: AWSEncodableShape { /// The keyword to delete. public let keyword: String - /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or PoolArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn and DescribePools to find the values of PoolId and PoolArn. + /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or PoolArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn and DescribePools to find the values of PoolId and PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String @inlinable @@ -1681,7 +1687,7 @@ extension PinpointSMSVoiceV2 { } public struct DeleteOptOutListRequest: AWSEncodableShape { - /// The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. + /// The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListName: String @inlinable @@ -1725,7 +1731,7 @@ extension PinpointSMSVoiceV2 { public struct DeleteOptedOutNumberRequest: AWSEncodableShape { /// The phone number, in E.164 format, to remove from the OptOutList. public let optedOutNumber: String - /// The OptOutListName or OptOutListArn to remove the phone number from. + /// The OptOutListName or OptOutListArn to remove the phone number from. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListName: String @inlinable @@ -1780,7 +1786,7 @@ extension PinpointSMSVoiceV2 { } public struct DeletePoolRequest: AWSEncodableShape { - /// The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn . + /// The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn . If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolId: String @inlinable @@ -2084,6 +2090,48 @@ extension PinpointSMSVoiceV2 { } } + public struct DeleteResourcePolicyRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource you're deleting the resource-based policy from. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 256) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[A-Za-z0-9_:/-]+$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + } + } + + public struct DeleteResourcePolicyResult: AWSDecodableShape { + /// The time when the resource-based policy was created, in UNIX epoch time format. + public let createdTimestamp: Date? + /// The JSON formatted resource-based policy that was deleted. + public let policy: String? + /// The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource that the resource-based policy was deleted from. + public let resourceArn: String? + + @inlinable + public init(createdTimestamp: Date? = nil, policy: String? = nil, resourceArn: String? = nil) { + self.createdTimestamp = createdTimestamp + self.policy = policy + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case createdTimestamp = "CreatedTimestamp" + case policy = "Policy" + case resourceArn = "ResourceArn" + } + } + public struct DeleteTextMessageSpendLimitOverrideRequest: AWSEncodableShape { public init() {} } @@ -2325,7 +2373,7 @@ extension PinpointSMSVoiceV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. public let nextToken: String? - /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. + /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String @inlinable @@ -2398,14 +2446,17 @@ extension PinpointSMSVoiceV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. public let nextToken: String? - /// The OptOutLists to show the details of. This is an array of strings that can be either the OptOutListName or OptOutListArn. + /// The OptOutLists to show the details of. This is an array of strings that can be either the OptOutListName or OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListNames: [String]? + /// Use SELF to filter the list of Opt-Out List to ones your account owns or use SHARED to filter on Opt-Out List shared with your account. The Owner and OptOutListNames parameters can't be used at the same time. + public let owner: Owner? @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil, optOutListNames: [String]? = nil) { + public init(maxResults: Int? = nil, nextToken: String? = nil, optOutListNames: [String]? = nil, owner: Owner? = nil) { self.maxResults = maxResults self.nextToken = nextToken self.optOutListNames = optOutListNames + self.owner = owner } public func validate(name: String) throws { @@ -2426,6 +2477,7 @@ extension PinpointSMSVoiceV2 { case maxResults = "MaxResults" case nextToken = "NextToken" case optOutListNames = "OptOutListNames" + case owner = "Owner" } } @@ -2456,7 +2508,7 @@ extension PinpointSMSVoiceV2 { public let nextToken: String? /// An array of phone numbers to search for in the OptOutList. public let optedOutNumbers: [String]? - /// The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. + /// The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListName: String @inlinable @@ -2531,14 +2583,17 @@ extension PinpointSMSVoiceV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. public let nextToken: String? - /// The unique identifier of phone numbers to find information about. This is an array of strings that can be either the PhoneNumberId or PhoneNumberArn. + /// Use SELF to filter the list of phone numbers to ones your account owns or use SHARED to filter on phone numbers shared with your account. The Owner and PhoneNumberIds parameters can't be used at the same time. + public let owner: Owner? + /// The unique identifier of phone numbers to find information about. This is an array of strings that can be either the PhoneNumberId or PhoneNumberArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let phoneNumberIds: [String]? @inlinable - public init(filters: [PhoneNumberFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, phoneNumberIds: [String]? = nil) { + public init(filters: [PhoneNumberFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, owner: Owner? = nil, phoneNumberIds: [String]? = nil) { self.filters = filters self.maxResults = maxResults self.nextToken = nextToken + self.owner = owner self.phoneNumberIds = phoneNumberIds } @@ -2564,6 +2619,7 @@ extension PinpointSMSVoiceV2 { case filters = "Filters" case maxResults = "MaxResults" case nextToken = "NextToken" + case owner = "Owner" case phoneNumberIds = "PhoneNumberIds" } } @@ -2593,14 +2649,17 @@ extension PinpointSMSVoiceV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. public let nextToken: String? - /// The unique identifier of pools to find. This is an array of strings that can be either the PoolId or PoolArn. + /// Use SELF to filter the list of Pools to ones your account owns or use SHARED to filter on Pools shared with your account. The Owner and PoolIds parameters can't be used at the same time. + public let owner: Owner? + /// The unique identifier of pools to find. This is an array of strings that can be either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolIds: [String]? @inlinable - public init(filters: [PoolFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, poolIds: [String]? = nil) { + public init(filters: [PoolFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, owner: Owner? = nil, poolIds: [String]? = nil) { self.filters = filters self.maxResults = maxResults self.nextToken = nextToken + self.owner = owner self.poolIds = poolIds } @@ -2626,6 +2685,7 @@ extension PinpointSMSVoiceV2 { case filters = "Filters" case maxResults = "MaxResults" case nextToken = "NextToken" + case owner = "Owner" case poolIds = "PoolIds" } } @@ -3202,14 +3262,17 @@ extension PinpointSMSVoiceV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. public let nextToken: String? - /// An array of SenderIdAndCountry objects to search for. + /// Use SELF to filter the list of Sender Ids to ones your account owns or use SHARED to filter on Sender Ids shared with your account. The Owner and SenderIds parameters can't be used at the same time. + public let owner: Owner? + /// An array of SenderIdAndCountry objects to search for. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let senderIds: [SenderIdAndCountry]? @inlinable - public init(filters: [SenderIdFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, senderIds: [SenderIdAndCountry]? = nil) { + public init(filters: [SenderIdFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil, owner: Owner? = nil, senderIds: [SenderIdAndCountry]? = nil) { self.filters = filters self.maxResults = maxResults self.nextToken = nextToken + self.owner = owner self.senderIds = senderIds } @@ -3233,6 +3296,7 @@ extension PinpointSMSVoiceV2 { case filters = "Filters" case maxResults = "MaxResults" case nextToken = "NextToken" + case owner = "Owner" case senderIds = "SenderIds" } } @@ -3376,9 +3440,9 @@ extension PinpointSMSVoiceV2 { public let clientToken: String? /// The two-character code, in ISO 3166-1 alpha-2 format, for the country or region. public let isoCountryCode: String - /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for SenderId and SenderIdArn. + /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String - /// The unique identifier for the pool to disassociate with the origination identity. This value can be either the PoolId or PoolArn. + /// The unique identifier for the pool to disassociate with the origination identity. This value can be either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolId: String @inlinable @@ -3629,6 +3693,48 @@ extension PinpointSMSVoiceV2 { } } + public struct GetResourcePolicyRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 256) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[A-Za-z0-9_:/-]+$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "ResourceArn" + } + } + + public struct GetResourcePolicyResult: AWSDecodableShape { + /// The time when the resource-based policy was created, in UNIX epoch time format. + public let createdTimestamp: Date? + /// The JSON formatted string that contains the resource-based policy attached to the AWS End User Messaging SMS and Voice resource. + public let policy: String? + /// The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy. + public let resourceArn: String? + + @inlinable + public init(createdTimestamp: Date? = nil, policy: String? = nil, resourceArn: String? = nil) { + self.createdTimestamp = createdTimestamp + self.policy = policy + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case createdTimestamp = "CreatedTimestamp" + case policy = "Policy" + case resourceArn = "ResourceArn" + } + } + public struct KeywordFilter: AWSEncodableShape { /// The name of the attribute to filter on. public let name: KeywordFilterName @@ -3713,7 +3819,7 @@ extension PinpointSMSVoiceV2 { public let maxResults: Int? /// The token to be used for the next set of paginated results. You don't need to supply a value for this field in the initial request. public let nextToken: String? - /// The unique identifier for the pool. This value can be either the PoolId or PoolArn. + /// The unique identifier for the pool. This value can be either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolId: String @inlinable @@ -4287,7 +4393,7 @@ extension PinpointSMSVoiceV2 { public let keywordAction: KeywordAction? /// The message associated with the keyword. public let keywordMessage: String - /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. + /// The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String @inlinable @@ -4351,7 +4457,7 @@ extension PinpointSMSVoiceV2 { public struct PutOptedOutNumberRequest: AWSEncodableShape { /// The phone number to add to the OptOutList in E.164 format. public let optedOutNumber: String - /// The OptOutListName or OptOutListArn to add the phone number to. + /// The OptOutListName or OptOutListArn to add the phone number to. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListName: String @inlinable @@ -4492,6 +4598,54 @@ extension PinpointSMSVoiceV2 { } } + public struct PutResourcePolicyRequest: AWSEncodableShape { + /// The JSON formatted resource-based policy to attach. + public let policy: String + /// The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource to attach the resource-based policy to. + public let resourceArn: String + + @inlinable + public init(policy: String, resourceArn: String) { + self.policy = policy + self.resourceArn = resourceArn + } + + public func validate(name: String) throws { + try self.validate(self.policy, name: "policy", parent: name, max: 10000) + try self.validate(self.policy, name: "policy", parent: name, min: 1) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 256) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[A-Za-z0-9_:/-]+$") + } + + private enum CodingKeys: String, CodingKey { + case policy = "Policy" + case resourceArn = "ResourceArn" + } + } + + public struct PutResourcePolicyResult: AWSDecodableShape { + /// The time when the resource-based policy was created, in UNIX epoch time format. + public let createdTimestamp: Date? + /// The JSON formatted Resource Policy. + public let policy: String? + /// The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy. + public let resourceArn: String? + + @inlinable + public init(createdTimestamp: Date? = nil, policy: String? = nil, resourceArn: String? = nil) { + self.createdTimestamp = createdTimestamp + self.policy = policy + self.resourceArn = resourceArn + } + + private enum CodingKeys: String, CodingKey { + case createdTimestamp = "CreatedTimestamp" + case policy = "Policy" + case resourceArn = "ResourceArn" + } + } + public struct RegistrationAssociationFilter: AWSEncodableShape { /// The name of the attribute to filter on. public let name: RegistrationAssociationFilterName @@ -5047,7 +5201,7 @@ extension PinpointSMSVoiceV2 { } public struct ReleasePhoneNumberRequest: AWSEncodableShape { - /// The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and PhoneNumberArn. + /// The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and PhoneNumberArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let phoneNumberId: String @inlinable @@ -5218,9 +5372,9 @@ extension PinpointSMSVoiceV2 { public let numberCapabilities: [NumberCapability] /// The type of phone number to request. public let numberType: RequestableNumberType - /// The name of the OptOutList to associate with the phone number. You can use the OptOutListName or OptOutListArn. + /// The name of the OptOutList to associate with the phone number. You can use the OptOutListName or OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListName: String? - /// The pool to associated with the phone number. You can use the PoolId or PoolArn. + /// The pool to associated with the phone number. You can use the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolId: String? /// Use this field to attach your phone number for an external registration process. public let registrationId: String? @@ -5511,7 +5665,7 @@ extension PinpointSMSVoiceV2 { public let destinationCountryParameters: [DestinationCountryParameterKey: String]? /// Choose the language to use for the message. public let languageCode: LanguageCode? - /// The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + /// The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String? /// Choose to send the verification code as an SMS or voice message. public let verificationChannel: VerificationChannel @@ -5596,7 +5750,7 @@ extension PinpointSMSVoiceV2 { public let mediaUrls: [String]? /// The text body of the message. public let messageBody: String? - /// The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + /// The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String /// The unique identifier of the protect configuration to use. public let protectConfigurationId: String? @@ -5703,7 +5857,7 @@ extension PinpointSMSVoiceV2 { public let messageBody: String? /// The type of message. Valid values are for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. public let messageType: MessageType? - /// The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. + /// The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String? /// The unique identifier for the protect configuration. public let protectConfigurationId: String? @@ -5812,7 +5966,7 @@ extension PinpointSMSVoiceV2 { public let messageBody: String? /// Specifies if the MessageBody field contains text or speech synthesis markup language (SSML). TEXT: This is the default value. When used the maximum character limit is 3000. SSML: When used the maximum character limit is 6000 including SSML tagging. public let messageBodyTextType: VoiceMessageBodyTextType? - /// The origination identity to use for the voice call. This can be the PhoneNumber, PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn. + /// The origination identity to use for the voice call. This can be the PhoneNumber, PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let originationIdentity: String /// The unique identifier for the protect configuration. public let protectConfigurationId: String? @@ -6543,7 +6697,7 @@ extension PinpointSMSVoiceV2 { public let deletionProtectionEnabled: Bool? /// The OptOutList to add the phone number to. Valid values for this field can be either the OutOutListName or OutOutListArn. public let optOutListName: String? - /// The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn. + /// The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let phoneNumberId: String /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? @@ -6672,9 +6826,9 @@ extension PinpointSMSVoiceV2 { public struct UpdatePoolRequest: AWSEncodableShape { /// When set to true the pool can't be deleted. public let deletionProtectionEnabled: Bool? - /// The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. + /// The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let optOutListName: String? - /// The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn. + /// The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn. If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN). public let poolId: String /// By default this is set to false. When an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End User Messaging SMS and Voice automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. public let selfManagedOptOutsEnabled: Bool? diff --git a/Sources/Soto/Services/Pricing/Pricing_api.swift b/Sources/Soto/Services/Pricing/Pricing_api.swift index a757b0a1d5..c4b27e9749 100644 --- a/Sources/Soto/Services/Pricing/Pricing_api.swift +++ b/Sources/Soto/Services/Pricing/Pricing_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS Pricing service. /// -/// The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following: Build cost control and scenario planning tools Reconcile billing data Forecast future spend for budgeting purposes Provide cost benefit analysis that compare your internal workloads with Amazon Web Services Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType. For more information, see Using the Amazon Web Services Price List API in the Billing User Guide. +/// The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following: Build cost control and scenario planning tools Reconcile billing data Forecast future spend for budgeting purposes Provide cost benefit analysis that compare your internal workloads with Amazon Web Services Use GetServices without a service code to retrieve the service codes for all Amazon Web Services services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType. For more information, see Using the Amazon Web Services Price List API in the Billing User Guide. public struct Pricing: AWSService { // MARK: Member variables diff --git a/Sources/Soto/Services/QBusiness/QBusiness_api.swift b/Sources/Soto/Services/QBusiness/QBusiness_api.swift index 545eed2508..55a3aa7037 100644 --- a/Sources/Soto/Services/QBusiness/QBusiness_api.swift +++ b/Sources/Soto/Services/QBusiness/QBusiness_api.swift @@ -257,7 +257,7 @@ public struct QBusiness: AWSService { /// - conversationId: The identifier of the Amazon Q Business conversation. /// - inputStream: The streaming input for the Chat API. /// - parentMessageId: The identifier used to associate a user message with a AI generated response. - /// - userGroups: The groups that a user associated with the chat input belongs to. + /// - userGroups: The group names that a user associated with the chat input belongs to. /// - userId: The identifier of the user attached to the chat input. /// - logger: Logger use during operation @inlinable @@ -309,7 +309,7 @@ public struct QBusiness: AWSService { /// - clientToken: A token that you provide to identify a chat request. /// - conversationId: The identifier of the Amazon Q Business conversation. /// - parentMessageId: The identifier of the previous system message in a conversation. - /// - userGroups: The groups that a user associated with the chat input belongs to. + /// - userGroups: The group names that a user associated with the chat input belongs to. /// - userId: The identifier of the user attached to the chat input. /// - userMessage: A end user message in a conversation. /// - logger: Logger use during operation @@ -375,7 +375,7 @@ public struct QBusiness: AWSService { /// - identityType: The authentication type being used by a Amazon Q Business application. /// - personalizationConfiguration: Configuration information about chat response personalization. For more information, see Personalizing chat responses /// - qAppsConfiguration: An option to allow end users to create and use Amazon Q Apps in the web experience. - /// - roleArn: The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. + /// - roleArn: The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a service linked role (SLR) and use it as the application's role. /// - tags: A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @. /// - logger: Logger use during operation @inlinable @@ -673,6 +673,7 @@ public struct QBusiness: AWSService { /// - applicationId: The identifier of the Amazon Q Business web experience. /// - clientToken: A token you provide to identify a request to create an Amazon Q Business web experience. /// - identityProviderConfiguration: Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience. + /// - origins: Sets the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified). You must only submit a base URL and not a full path. For example, https://docs.aws.amazon.com. /// - roleArn: The Amazon Resource Name (ARN) of the service role attached to your web experience. You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value. /// - samplePromptsControlMode: Determines whether sample prompts are enabled in the web experience for an end user. /// - subtitle: A subtitle to personalize your Amazon Q Business web experience. @@ -685,6 +686,7 @@ public struct QBusiness: AWSService { applicationId: String, clientToken: String? = CreateWebExperienceRequest.idempotencyToken(), identityProviderConfiguration: IdentityProviderConfiguration? = nil, + origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, @@ -697,6 +699,7 @@ public struct QBusiness: AWSService { applicationId: applicationId, clientToken: clientToken, identityProviderConfiguration: identityProviderConfiguration, + origins: origins, roleArn: roleArn, samplePromptsControlMode: samplePromptsControlMode, subtitle: subtitle, @@ -2349,6 +2352,7 @@ public struct QBusiness: AWSService { /// Parameters: /// - applicationId: The identifier of the Amazon Q Business application attached to the web experience. /// - identityProviderConfiguration: Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience. + /// - origins: Updates the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified). Any values except null submitted as part of this update will replace all previous values. You must only submit a base URL and not a full path. For example, https://docs.aws.amazon.com. /// - roleArn: The Amazon Resource Name (ARN) of the role with permission to access the Amazon Q Business web experience and required resources. /// - samplePromptsControlMode: Determines whether sample prompts are enabled in the web experience for an end user. /// - subtitle: The subtitle of the Amazon Q Business web experience. @@ -2360,6 +2364,7 @@ public struct QBusiness: AWSService { public func updateWebExperience( applicationId: String, identityProviderConfiguration: IdentityProviderConfiguration? = nil, + origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, @@ -2371,6 +2376,7 @@ public struct QBusiness: AWSService { let input = UpdateWebExperienceRequest( applicationId: applicationId, identityProviderConfiguration: identityProviderConfiguration, + origins: origins, roleArn: roleArn, samplePromptsControlMode: samplePromptsControlMode, subtitle: subtitle, diff --git a/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift b/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift index 4f3cf20908..5d2f1a2eb2 100644 --- a/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift +++ b/Sources/Soto/Services/QBusiness/QBusiness_shapes.swift @@ -1701,7 +1701,7 @@ extension QBusiness { public let inputStream: AWSEventStream? /// The identifier used to associate a user message with a AI generated response. public let parentMessageId: String? - /// The groups that a user associated with the chat input belongs to. + /// The group names that a user associated with the chat input belongs to. public let userGroups: [String]? /// The identifier of the user attached to the chat input. public let userId: String? @@ -1792,7 +1792,7 @@ extension QBusiness { public let conversationId: String? /// The identifier of the previous system message in a conversation. public let parentMessageId: String? - /// The groups that a user associated with the chat input belongs to. + /// The group names that a user associated with the chat input belongs to. public let userGroups: [String]? /// The identifier of the user attached to the chat input. public let userId: String? @@ -2031,7 +2031,7 @@ extension QBusiness { public let personalizationConfiguration: PersonalizationConfiguration? /// An option to allow end users to create and use Amazon Q Apps in the web experience. public let qAppsConfiguration: QAppsConfiguration? - /// The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. + /// The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a service linked role (SLR) and use it as the application's role. public let roleArn: String? /// A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @. public let tags: [Tag]? @@ -2548,6 +2548,8 @@ extension QBusiness { public let clientToken: String? /// Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience. public let identityProviderConfiguration: IdentityProviderConfiguration? + /// Sets the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified). You must only submit a base URL and not a full path. For example, https://docs.aws.amazon.com. + public let origins: [String]? /// The Amazon Resource Name (ARN) of the service role attached to your web experience. You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value. public let roleArn: String? /// Determines whether sample prompts are enabled in the web experience for an end user. @@ -2562,10 +2564,11 @@ extension QBusiness { public let welcomeMessage: String? @inlinable - public init(applicationId: String, clientToken: String? = CreateWebExperienceRequest.idempotencyToken(), identityProviderConfiguration: IdentityProviderConfiguration? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, tags: [Tag]? = nil, title: String? = nil, welcomeMessage: String? = nil) { + public init(applicationId: String, clientToken: String? = CreateWebExperienceRequest.idempotencyToken(), identityProviderConfiguration: IdentityProviderConfiguration? = nil, origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, tags: [Tag]? = nil, title: String? = nil, welcomeMessage: String? = nil) { self.applicationId = applicationId self.clientToken = clientToken self.identityProviderConfiguration = identityProviderConfiguration + self.origins = origins self.roleArn = roleArn self.samplePromptsControlMode = samplePromptsControlMode self.subtitle = subtitle @@ -2580,6 +2583,7 @@ extension QBusiness { request.encodePath(self.applicationId, key: "applicationId") try container.encodeIfPresent(self.clientToken, forKey: .clientToken) try container.encodeIfPresent(self.identityProviderConfiguration, forKey: .identityProviderConfiguration) + try container.encodeIfPresent(self.origins, forKey: .origins) try container.encodeIfPresent(self.roleArn, forKey: .roleArn) try container.encodeIfPresent(self.samplePromptsControlMode, forKey: .samplePromptsControlMode) try container.encodeIfPresent(self.subtitle, forKey: .subtitle) @@ -2595,6 +2599,12 @@ extension QBusiness { try self.validate(self.clientToken, name: "clientToken", parent: name, max: 100) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) try self.identityProviderConfiguration?.validate(name: "\(name).identityProviderConfiguration") + try self.origins?.forEach { + try validate($0, name: "origins[]", parent: name, max: 256) + try validate($0, name: "origins[]", parent: name, min: 1) + try validate($0, name: "origins[]", parent: name, pattern: "^(http://|https://)[a-zA-Z0-9-_.]+(?::[0-9]{1,5})?$") + } + try self.validate(self.origins, name: "origins", parent: name, max: 10) try self.validate(self.roleArn, name: "roleArn", parent: name, max: 1284) try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}$") try self.validate(self.subtitle, name: "subtitle", parent: name, max: 500) @@ -2611,6 +2621,7 @@ extension QBusiness { private enum CodingKeys: String, CodingKey { case clientToken = "clientToken" case identityProviderConfiguration = "identityProviderConfiguration" + case origins = "origins" case roleArn = "roleArn" case samplePromptsControlMode = "samplePromptsControlMode" case subtitle = "subtitle" @@ -4254,6 +4265,8 @@ extension QBusiness { public let error: ErrorDetail? /// Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience. public let identityProviderConfiguration: IdentityProviderConfiguration? + /// Gets the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified). + public let origins: [String]? /// The Amazon Resource Name (ARN) of the service role attached to your web experience. public let roleArn: String? /// Determines whether sample prompts are enabled in the web experience for an end user. @@ -4274,13 +4287,14 @@ extension QBusiness { public let welcomeMessage: String? @inlinable - public init(applicationId: String? = nil, createdAt: Date? = nil, defaultEndpoint: String? = nil, error: ErrorDetail? = nil, identityProviderConfiguration: IdentityProviderConfiguration? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, status: WebExperienceStatus? = nil, subtitle: String? = nil, title: String? = nil, updatedAt: Date? = nil, webExperienceArn: String? = nil, webExperienceId: String? = nil, welcomeMessage: String? = nil) { + public init(applicationId: String? = nil, createdAt: Date? = nil, defaultEndpoint: String? = nil, error: ErrorDetail? = nil, identityProviderConfiguration: IdentityProviderConfiguration? = nil, origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, status: WebExperienceStatus? = nil, subtitle: String? = nil, title: String? = nil, updatedAt: Date? = nil, webExperienceArn: String? = nil, webExperienceId: String? = nil, welcomeMessage: String? = nil) { self.applicationId = applicationId self.authenticationConfiguration = nil self.createdAt = createdAt self.defaultEndpoint = defaultEndpoint self.error = error self.identityProviderConfiguration = identityProviderConfiguration + self.origins = origins self.roleArn = roleArn self.samplePromptsControlMode = samplePromptsControlMode self.status = status @@ -4294,13 +4308,14 @@ extension QBusiness { @available(*, deprecated, message: "Members authenticationConfiguration have been deprecated") @inlinable - public init(applicationId: String? = nil, authenticationConfiguration: WebExperienceAuthConfiguration? = nil, createdAt: Date? = nil, defaultEndpoint: String? = nil, error: ErrorDetail? = nil, identityProviderConfiguration: IdentityProviderConfiguration? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, status: WebExperienceStatus? = nil, subtitle: String? = nil, title: String? = nil, updatedAt: Date? = nil, webExperienceArn: String? = nil, webExperienceId: String? = nil, welcomeMessage: String? = nil) { + public init(applicationId: String? = nil, authenticationConfiguration: WebExperienceAuthConfiguration? = nil, createdAt: Date? = nil, defaultEndpoint: String? = nil, error: ErrorDetail? = nil, identityProviderConfiguration: IdentityProviderConfiguration? = nil, origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, status: WebExperienceStatus? = nil, subtitle: String? = nil, title: String? = nil, updatedAt: Date? = nil, webExperienceArn: String? = nil, webExperienceId: String? = nil, welcomeMessage: String? = nil) { self.applicationId = applicationId self.authenticationConfiguration = authenticationConfiguration self.createdAt = createdAt self.defaultEndpoint = defaultEndpoint self.error = error self.identityProviderConfiguration = identityProviderConfiguration + self.origins = origins self.roleArn = roleArn self.samplePromptsControlMode = samplePromptsControlMode self.status = status @@ -4319,6 +4334,7 @@ extension QBusiness { case defaultEndpoint = "defaultEndpoint" case error = "error" case identityProviderConfiguration = "identityProviderConfiguration" + case origins = "origins" case roleArn = "roleArn" case samplePromptsControlMode = "samplePromptsControlMode" case status = "status" @@ -6831,6 +6847,8 @@ extension QBusiness { public let authenticationConfiguration: WebExperienceAuthConfiguration? /// Information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience. public let identityProviderConfiguration: IdentityProviderConfiguration? + /// Updates the website domain origins that are allowed to embed the Amazon Q Business web experience. The domain origin refers to the base URL for accessing a website including the protocol (http/https), the domain name, and the port number (if specified). Any values except null submitted as part of this update will replace all previous values. You must only submit a base URL and not a full path. For example, https://docs.aws.amazon.com. + public let origins: [String]? /// The Amazon Resource Name (ARN) of the role with permission to access the Amazon Q Business web experience and required resources. public let roleArn: String? /// Determines whether sample prompts are enabled in the web experience for an end user. @@ -6845,10 +6863,11 @@ extension QBusiness { public let welcomeMessage: String? @inlinable - public init(applicationId: String, identityProviderConfiguration: IdentityProviderConfiguration? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, title: String? = nil, webExperienceId: String, welcomeMessage: String? = nil) { + public init(applicationId: String, identityProviderConfiguration: IdentityProviderConfiguration? = nil, origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, title: String? = nil, webExperienceId: String, welcomeMessage: String? = nil) { self.applicationId = applicationId self.authenticationConfiguration = nil self.identityProviderConfiguration = identityProviderConfiguration + self.origins = origins self.roleArn = roleArn self.samplePromptsControlMode = samplePromptsControlMode self.subtitle = subtitle @@ -6859,10 +6878,11 @@ extension QBusiness { @available(*, deprecated, message: "Members authenticationConfiguration have been deprecated") @inlinable - public init(applicationId: String, authenticationConfiguration: WebExperienceAuthConfiguration? = nil, identityProviderConfiguration: IdentityProviderConfiguration? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, title: String? = nil, webExperienceId: String, welcomeMessage: String? = nil) { + public init(applicationId: String, authenticationConfiguration: WebExperienceAuthConfiguration? = nil, identityProviderConfiguration: IdentityProviderConfiguration? = nil, origins: [String]? = nil, roleArn: String? = nil, samplePromptsControlMode: WebExperienceSamplePromptsControlMode? = nil, subtitle: String? = nil, title: String? = nil, webExperienceId: String, welcomeMessage: String? = nil) { self.applicationId = applicationId self.authenticationConfiguration = authenticationConfiguration self.identityProviderConfiguration = identityProviderConfiguration + self.origins = origins self.roleArn = roleArn self.samplePromptsControlMode = samplePromptsControlMode self.subtitle = subtitle @@ -6877,6 +6897,7 @@ extension QBusiness { request.encodePath(self.applicationId, key: "applicationId") try container.encodeIfPresent(self.authenticationConfiguration, forKey: .authenticationConfiguration) try container.encodeIfPresent(self.identityProviderConfiguration, forKey: .identityProviderConfiguration) + try container.encodeIfPresent(self.origins, forKey: .origins) try container.encodeIfPresent(self.roleArn, forKey: .roleArn) try container.encodeIfPresent(self.samplePromptsControlMode, forKey: .samplePromptsControlMode) try container.encodeIfPresent(self.subtitle, forKey: .subtitle) @@ -6891,6 +6912,12 @@ extension QBusiness { try self.validate(self.applicationId, name: "applicationId", parent: name, pattern: "^[a-zA-Z0-9][a-zA-Z0-9-]{35}$") try self.authenticationConfiguration?.validate(name: "\(name).authenticationConfiguration") try self.identityProviderConfiguration?.validate(name: "\(name).identityProviderConfiguration") + try self.origins?.forEach { + try validate($0, name: "origins[]", parent: name, max: 256) + try validate($0, name: "origins[]", parent: name, min: 1) + try validate($0, name: "origins[]", parent: name, pattern: "^(http://|https://)[a-zA-Z0-9-_.]+(?::[0-9]{1,5})?$") + } + try self.validate(self.origins, name: "origins", parent: name, max: 10) try self.validate(self.roleArn, name: "roleArn", parent: name, max: 1284) try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:[a-z0-9-\\.]{1,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[a-z0-9-\\.]{0,63}:[^/].{0,1023}$") try self.validate(self.subtitle, name: "subtitle", parent: name, max: 500) @@ -6906,6 +6933,7 @@ extension QBusiness { private enum CodingKeys: String, CodingKey { case authenticationConfiguration = "authenticationConfiguration" case identityProviderConfiguration = "identityProviderConfiguration" + case origins = "origins" case roleArn = "roleArn" case samplePromptsControlMode = "samplePromptsControlMode" case subtitle = "subtitle" @@ -6952,7 +6980,7 @@ extension QBusiness { } public struct UsersAndGroups: AWSEncodableShape & AWSDecodableShape { - /// The user groups associated with a topic control rule. + /// The user group names associated with a topic control rule. public let userGroups: [String]? /// The user ids associated with a topic control rule. public let userIds: [String]? diff --git a/Sources/Soto/Services/QConnect/QConnect_api.swift b/Sources/Soto/Services/QConnect/QConnect_api.swift index be63277f3f..f6e75a0d44 100644 --- a/Sources/Soto/Services/QConnect/QConnect_api.swift +++ b/Sources/Soto/Services/QConnect/QConnect_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS QConnect service. /// -/// Powered by Amazon Bedrock: Amazon Web Services implements automated abuse detection. Because Amazon Q in Connect is built on Amazon Bedrock, users can take full advantage of the controls implemented in Amazon Bedrock to enforce safety, security, and the responsible use of artificial intelligence (AI). Amazon Q in Connect is a generative AI customer service assistant. It is an LLM-enhanced evolution of Amazon Connect Wisdom that delivers real-time recommendations to help contact center agents resolve customer issues quickly and accurately. Amazon Q in Connect automatically detects customer intent during calls and chats using conversational analytics and natural language understanding (NLU). It then provides agents with immediate, real-time generative responses and suggested actions, and links to relevant documents and articles. Agents can also query Amazon Q in Connect directly using natural language or keywords to answer customer requests. Use the Amazon Q in Connect APIs to create an assistant and a knowledge base, for example, or manage content by uploading custom files. For more information, see Use Amazon Q in Connect for generative AI powered agent assistance in real-time in the Amazon Connect Administrator Guide. +/// Amazon Q actions Amazon Q data types Powered by Amazon Bedrock: Amazon Web Services implements automated abuse detection. Because Amazon Q in Connect is built on Amazon Bedrock, users can take full advantage of the controls implemented in Amazon Bedrock to enforce safety, security, and the responsible use of artificial intelligence (AI). Amazon Q in Connect is a generative AI customer service assistant. It is an LLM-enhanced evolution of Amazon Connect Wisdom that delivers real-time recommendations to help contact center agents resolve customer issues quickly and accurately. Amazon Q in Connect automatically detects customer intent during calls and chats using conversational analytics and natural language understanding (NLU). It then provides agents with immediate, real-time generative responses and suggested actions, and links to relevant documents and articles. Agents can also query Amazon Q in Connect directly using natural language or keywords to answer customer requests. Use the Amazon Q in Connect APIs to create an assistant and a knowledge base, for example, or manage content by uploading custom files. For more information, see Use Amazon Q in Connect for generative AI powered agent assistance in real-time in the Amazon Connect Administrator Guide. public struct QConnect: AWSService { // MARK: Member variables @@ -80,6 +80,7 @@ public struct QConnect: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ + "ca-central-1": "wisdom-fips.ca-central-1.amazonaws.com", "us-east-1": "wisdom-fips.us-east-1.amazonaws.com", "us-west-2": "wisdom-fips.us-west-2.amazonaws.com" ]) @@ -87,6 +88,191 @@ public struct QConnect: AWSService { // MARK: API Calls + /// Creates an Amazon Q in Connect AI Agent. + @Sendable + @inlinable + public func createAIAgent(_ input: CreateAIAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAIAgentResponse { + try await self.client.execute( + operation: "CreateAIAgent", + path: "/assistants/{assistantId}/aiagents", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an Amazon Q in Connect AI Agent. + /// + /// Parameters: + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - configuration: The configuration of the AI Agent. + /// - description: The description of the AI Agent. + /// - name: The name of the AI Agent. + /// - tags: The tags used to organize, track, or control access for this resource. + /// - type: The type of the AI Agent. + /// - visibilityStatus: The visibility status of the AI Agent. + /// - logger: Logger use during operation + @inlinable + public func createAIAgent( + assistantId: String, + clientToken: String? = CreateAIAgentRequest.idempotencyToken(), + configuration: AIAgentConfiguration, + description: String? = nil, + name: String, + tags: [String: String]? = nil, + type: AIAgentType, + visibilityStatus: VisibilityStatus, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateAIAgentResponse { + let input = CreateAIAgentRequest( + assistantId: assistantId, + clientToken: clientToken, + configuration: configuration, + description: description, + name: name, + tags: tags, + type: type, + visibilityStatus: visibilityStatus + ) + return try await self.createAIAgent(input, logger: logger) + } + + /// Creates and Amazon Q in Connect AI Agent version. + @Sendable + @inlinable + public func createAIAgentVersion(_ input: CreateAIAgentVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAIAgentVersionResponse { + try await self.client.execute( + operation: "CreateAIAgentVersion", + path: "/assistants/{assistantId}/aiagents/{aiAgentId}/versions", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates and Amazon Q in Connect AI Agent version. + /// + /// Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - modifiedTime: The modification time of the AI Agent should be tracked for version creation. This field should be specified to avoid version creation when simultaneous update to the underlying AI Agent are possible. The value should be the modifiedTime returned from the request to create or update an AI Agent so that version creation can fail if an update to the AI Agent post the specified modification time has been made. + /// - logger: Logger use during operation + @inlinable + public func createAIAgentVersion( + aiAgentId: String, + assistantId: String, + clientToken: String? = CreateAIAgentVersionRequest.idempotencyToken(), + modifiedTime: Date? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateAIAgentVersionResponse { + let input = CreateAIAgentVersionRequest( + aiAgentId: aiAgentId, + assistantId: assistantId, + clientToken: clientToken, + modifiedTime: modifiedTime + ) + return try await self.createAIAgentVersion(input, logger: logger) + } + + /// Creates an Amazon Q in Connect AI Prompt. + @Sendable + @inlinable + public func createAIPrompt(_ input: CreateAIPromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAIPromptResponse { + try await self.client.execute( + operation: "CreateAIPrompt", + path: "/assistants/{assistantId}/aiprompts", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an Amazon Q in Connect AI Prompt. + /// + /// Parameters: + /// - apiFormat: The API Format of the AI Prompt. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - description: The description of the AI Prompt. + /// - modelId: The identifier of the model used for this AI Prompt. Model Ids supported are: CLAUDE_3_HAIKU_20240307_V1 + /// - name: The name of the AI Prompt. + /// - tags: The tags used to organize, track, or control access for this resource. + /// - templateConfiguration: The configuration of the prompt template for this AI Prompt. + /// - templateType: The type of the prompt template for this AI Prompt. + /// - type: The type of this AI Prompt. + /// - visibilityStatus: The visibility status of the AI Prompt. + /// - logger: Logger use during operation + @inlinable + public func createAIPrompt( + apiFormat: AIPromptAPIFormat, + assistantId: String, + clientToken: String? = CreateAIPromptRequest.idempotencyToken(), + description: String? = nil, + modelId: String, + name: String, + tags: [String: String]? = nil, + templateConfiguration: AIPromptTemplateConfiguration, + templateType: AIPromptTemplateType, + type: AIPromptType, + visibilityStatus: VisibilityStatus, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateAIPromptResponse { + let input = CreateAIPromptRequest( + apiFormat: apiFormat, + assistantId: assistantId, + clientToken: clientToken, + description: description, + modelId: modelId, + name: name, + tags: tags, + templateConfiguration: templateConfiguration, + templateType: templateType, + type: type, + visibilityStatus: visibilityStatus + ) + return try await self.createAIPrompt(input, logger: logger) + } + + /// Creates an Amazon Q in Connect AI Prompt version. + @Sendable + @inlinable + public func createAIPromptVersion(_ input: CreateAIPromptVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAIPromptVersionResponse { + try await self.client.execute( + operation: "CreateAIPromptVersion", + path: "/assistants/{assistantId}/aiprompts/{aiPromptId}/versions", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates an Amazon Q in Connect AI Prompt version. + /// + /// Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI prompt. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - modifiedTime: The time the AI Prompt was last modified. + /// - logger: Logger use during operation + @inlinable + public func createAIPromptVersion( + aiPromptId: String, + assistantId: String, + clientToken: String? = CreateAIPromptVersionRequest.idempotencyToken(), + modifiedTime: Date? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateAIPromptVersionResponse { + let input = CreateAIPromptVersionRequest( + aiPromptId: aiPromptId, + assistantId: assistantId, + clientToken: clientToken, + modifiedTime: modifiedTime + ) + return try await self.createAIPromptVersion(input, logger: logger) + } + /// Creates an Amazon Q in Connect assistant. @Sendable @inlinable @@ -290,6 +476,7 @@ public struct QConnect: AWSService { /// - serverSideEncryptionConfiguration: The configuration information for the customer managed key used for encryption. This KMS key must have a policy that allows kms:CreateGrant, kms:DescribeKey, kms:Decrypt, and kms:GenerateDataKey* permissions to the IAM identity using the key to invoke Amazon Q in Connect. For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for your instance. /// - sourceConfiguration: The source of the knowledge base content. Only set this argument for EXTERNAL knowledge bases. /// - tags: The tags used to organize, track, or control access for this resource. + /// - vectorIngestionConfiguration: Contains details about how to ingest the documents in a data source. /// - logger: Logger use during operation @inlinable public func createKnowledgeBase( @@ -301,6 +488,7 @@ public struct QConnect: AWSService { serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, tags: [String: String]? = nil, + vectorIngestionConfiguration: VectorIngestionConfiguration? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateKnowledgeBaseResponse { let input = CreateKnowledgeBaseRequest( @@ -311,7 +499,8 @@ public struct QConnect: AWSService { renderingConfiguration: renderingConfiguration, serverSideEncryptionConfiguration: serverSideEncryptionConfiguration, sourceConfiguration: sourceConfiguration, - tags: tags + tags: tags, + vectorIngestionConfiguration: vectorIngestionConfiguration ) return try await self.createKnowledgeBase(input, logger: logger) } @@ -394,6 +583,7 @@ public struct QConnect: AWSService { /// Creates a session. A session is a contextual container used for generating recommendations. Amazon Connect creates a new Amazon Q in Connect session for each contact on which Amazon Q in Connect is enabled. /// /// Parameters: + /// - aiAgentConfiguration: The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that should be used by Amazon Q in Connect for this Session. /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. /// - description: The description. @@ -403,6 +593,7 @@ public struct QConnect: AWSService { /// - logger: Logger use during operation @inlinable public func createSession( + aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, assistantId: String, clientToken: String? = CreateSessionRequest.idempotencyToken(), description: String? = nil, @@ -412,6 +603,7 @@ public struct QConnect: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateSessionResponse { let input = CreateSessionRequest( + aiAgentConfiguration: aiAgentConfiguration, assistantId: assistantId, clientToken: clientToken, description: description, @@ -422,6 +614,140 @@ public struct QConnect: AWSService { return try await self.createSession(input, logger: logger) } + /// Deletes an Amazon Q in Connect AI Agent. + @Sendable + @inlinable + public func deleteAIAgent(_ input: DeleteAIAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAIAgentResponse { + try await self.client.execute( + operation: "DeleteAIAgent", + path: "/assistants/{assistantId}/aiagents/{aiAgentId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an Amazon Q in Connect AI Agent. + /// + /// Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func deleteAIAgent( + aiAgentId: String, + assistantId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteAIAgentResponse { + let input = DeleteAIAgentRequest( + aiAgentId: aiAgentId, + assistantId: assistantId + ) + return try await self.deleteAIAgent(input, logger: logger) + } + + /// Deletes an Amazon Q in Connect AI Agent Version. + @Sendable + @inlinable + public func deleteAIAgentVersion(_ input: DeleteAIAgentVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAIAgentVersionResponse { + try await self.client.execute( + operation: "DeleteAIAgentVersion", + path: "/assistants/{assistantId}/aiagents/{aiAgentId}/versions/{versionNumber}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an Amazon Q in Connect AI Agent Version. + /// + /// Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - versionNumber: The version number of the AI Agent version. + /// - logger: Logger use during operation + @inlinable + public func deleteAIAgentVersion( + aiAgentId: String, + assistantId: String, + versionNumber: Int64, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteAIAgentVersionResponse { + let input = DeleteAIAgentVersionRequest( + aiAgentId: aiAgentId, + assistantId: assistantId, + versionNumber: versionNumber + ) + return try await self.deleteAIAgentVersion(input, logger: logger) + } + + /// Deletes an Amazon Q in Connect AI Prompt. + @Sendable + @inlinable + public func deleteAIPrompt(_ input: DeleteAIPromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAIPromptResponse { + try await self.client.execute( + operation: "DeleteAIPrompt", + path: "/assistants/{assistantId}/aiprompts/{aiPromptId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes an Amazon Q in Connect AI Prompt. + /// + /// Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI prompt. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func deleteAIPrompt( + aiPromptId: String, + assistantId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteAIPromptResponse { + let input = DeleteAIPromptRequest( + aiPromptId: aiPromptId, + assistantId: assistantId + ) + return try await self.deleteAIPrompt(input, logger: logger) + } + + /// Delete and Amazon Q in Connect AI Prompt version. + @Sendable + @inlinable + public func deleteAIPromptVersion(_ input: DeleteAIPromptVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAIPromptVersionResponse { + try await self.client.execute( + operation: "DeleteAIPromptVersion", + path: "/assistants/{assistantId}/aiprompts/{aiPromptId}/versions/{versionNumber}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete and Amazon Q in Connect AI Prompt version. + /// + /// Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI prompt. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - versionNumber: The version number of the AI Prompt version to be deleted. + /// - logger: Logger use during operation + @inlinable + public func deleteAIPromptVersion( + aiPromptId: String, + assistantId: String, + versionNumber: Int64, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteAIPromptVersionResponse { + let input = DeleteAIPromptVersionRequest( + aiPromptId: aiPromptId, + assistantId: assistantId, + versionNumber: versionNumber + ) + return try await self.deleteAIPromptVersion(input, logger: logger) + } + /// Deletes an assistant. @Sendable @inlinable @@ -643,6 +969,70 @@ public struct QConnect: AWSService { return try await self.deleteQuickResponse(input, logger: logger) } + /// Gets an Amazon Q in Connect AI Agent. + @Sendable + @inlinable + public func getAIAgent(_ input: GetAIAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAIAgentResponse { + try await self.client.execute( + operation: "GetAIAgent", + path: "/assistants/{assistantId}/aiagents/{aiAgentId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets an Amazon Q in Connect AI Agent. + /// + /// Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent (with or without a version qualifier). Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func getAIAgent( + aiAgentId: String, + assistantId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetAIAgentResponse { + let input = GetAIAgentRequest( + aiAgentId: aiAgentId, + assistantId: assistantId + ) + return try await self.getAIAgent(input, logger: logger) + } + + /// Gets and Amazon Q in Connect AI Prompt. + @Sendable + @inlinable + public func getAIPrompt(_ input: GetAIPromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetAIPromptResponse { + try await self.client.execute( + operation: "GetAIPrompt", + path: "/assistants/{assistantId}/aiprompts/{aiPromptId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets and Amazon Q in Connect AI Prompt. + /// + /// Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI prompt. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func getAIPrompt( + aiPromptId: String, + assistantId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetAIPromptResponse { + let input = GetAIPromptRequest( + aiPromptId: aiPromptId, + assistantId: assistantId + ) + return try await self.getAIPrompt(input, logger: logger) + } + /// Retrieves information about an assistant. @Sendable @inlinable @@ -953,19 +1343,177 @@ public struct QConnect: AWSService { /// /// Parameters: /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. - /// - sessionId: The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - sessionId: The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func getSession( + assistantId: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetSessionResponse { + let input = GetSessionRequest( + assistantId: assistantId, + sessionId: sessionId + ) + return try await self.getSession(input, logger: logger) + } + + /// List AI Agent versions. + @Sendable + @inlinable + public func listAIAgentVersions(_ input: ListAIAgentVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAIAgentVersionsResponse { + try await self.client.execute( + operation: "ListAIAgentVersions", + path: "/assistants/{assistantId}/aiagents/{aiAgentId}/versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List AI Agent versions. + /// + /// Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent for which versions are to be listed. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - nextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + /// - origin: The origin of the AI Agent versions to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger use during operation + @inlinable + public func listAIAgentVersions( + aiAgentId: String, + assistantId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListAIAgentVersionsResponse { + let input = ListAIAgentVersionsRequest( + aiAgentId: aiAgentId, + assistantId: assistantId, + maxResults: maxResults, + nextToken: nextToken, + origin: origin + ) + return try await self.listAIAgentVersions(input, logger: logger) + } + + /// Lists AI Agents. + @Sendable + @inlinable + public func listAIAgents(_ input: ListAIAgentsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAIAgentsResponse { + try await self.client.execute( + operation: "ListAIAgents", + path: "/assistants/{assistantId}/aiagents", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists AI Agents. + /// + /// Parameters: + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - nextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + /// - origin: The origin of the AI Agents to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger use during operation + @inlinable + public func listAIAgents( + assistantId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListAIAgentsResponse { + let input = ListAIAgentsRequest( + assistantId: assistantId, + maxResults: maxResults, + nextToken: nextToken, + origin: origin + ) + return try await self.listAIAgents(input, logger: logger) + } + + /// Lists AI Prompt versions. + @Sendable + @inlinable + public func listAIPromptVersions(_ input: ListAIPromptVersionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAIPromptVersionsResponse { + try await self.client.execute( + operation: "ListAIPromptVersions", + path: "/assistants/{assistantId}/aiprompts/{aiPromptId}/versions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists AI Prompt versions. + /// + /// Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI prompt for which versions are to be listed. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - nextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + /// - origin: The origin of the AI Prompt versions to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger use during operation + @inlinable + public func listAIPromptVersions( + aiPromptId: String, + assistantId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListAIPromptVersionsResponse { + let input = ListAIPromptVersionsRequest( + aiPromptId: aiPromptId, + assistantId: assistantId, + maxResults: maxResults, + nextToken: nextToken, + origin: origin + ) + return try await self.listAIPromptVersions(input, logger: logger) + } + + /// Lists the AI Prompts available on the Amazon Q in Connect assistant. + @Sendable + @inlinable + public func listAIPrompts(_ input: ListAIPromptsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAIPromptsResponse { + try await self.client.execute( + operation: "ListAIPrompts", + path: "/assistants/{assistantId}/aiprompts", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists the AI Prompts available on the Amazon Q in Connect assistant. + /// + /// Parameters: + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - nextToken: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + /// - origin: The origin of the AI Prompts to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. /// - logger: Logger use during operation @inlinable - public func getSession( + public func listAIPrompts( assistantId: String, - sessionId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + origin: Origin? = nil, logger: Logger = AWSClient.loggingDisabled - ) async throws -> GetSessionResponse { - let input = GetSessionRequest( + ) async throws -> ListAIPromptsResponse { + let input = ListAIPromptsRequest( assistantId: assistantId, - sessionId: sessionId + maxResults: maxResults, + nextToken: nextToken, + origin: origin ) - return try await self.getSession(input, logger: logger) + return try await self.listAIPrompts(input, logger: logger) } /// Lists information about assistant associations. @@ -1332,7 +1880,9 @@ public struct QConnect: AWSService { /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. /// - maxResults: The maximum number of results to return per page. /// - nextToken: The token for the next set of results. Use the value returned in the previous + /// - overrideKnowledgeBaseSearchType: The search type to be used against the Knowledge Base for this request. The values can be SEMANTIC which uses vector embeddings or HYBRID which use vector embeddings and raw text. /// - queryCondition: Information about how to query content. + /// - queryInputData: Information about the query. /// - queryText: The text to search for. /// - sessionId: The identifier of the Amazon Q in Connect session. Can be either the ID or the ARN. URLs cannot contain the ARN. /// - logger: Logger use during operation @@ -1342,8 +1892,10 @@ public struct QConnect: AWSService { assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, + overrideKnowledgeBaseSearchType: KnowledgeBaseSearchType? = nil, queryCondition: [QueryCondition]? = nil, - queryText: String, + queryInputData: QueryInputData? = nil, + queryText: String? = nil, sessionId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> QueryAssistantResponse { @@ -1351,13 +1903,47 @@ public struct QConnect: AWSService { assistantId: assistantId, maxResults: maxResults, nextToken: nextToken, + overrideKnowledgeBaseSearchType: overrideKnowledgeBaseSearchType, queryCondition: queryCondition, + queryInputData: queryInputData, queryText: queryText, sessionId: sessionId ) return try await self.queryAssistant(input, logger: logger) } + /// Removes the AI Agent that is set for use by defafult on an Amazon Q in Connect Assistant. + @Sendable + @inlinable + public func removeAssistantAIAgent(_ input: RemoveAssistantAIAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RemoveAssistantAIAgentResponse { + try await self.client.execute( + operation: "RemoveAssistantAIAgent", + path: "/assistants/{assistantId}/aiagentConfiguration", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes the AI Agent that is set for use by defafult on an Amazon Q in Connect Assistant. + /// + /// Parameters: + /// - aiAgentType: The type of the AI Agent being removed for use by default from the Amazon Q in Connect Assistant. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func removeAssistantAIAgent( + aiAgentType: AIAgentType, + assistantId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> RemoveAssistantAIAgentResponse { + let input = RemoveAssistantAIAgentRequest( + aiAgentType: aiAgentType, + assistantId: assistantId + ) + return try await self.removeAssistantAIAgent(input, logger: logger) + } + /// Removes a URI template from a knowledge base. @Sendable @inlinable @@ -1647,6 +2233,129 @@ public struct QConnect: AWSService { return try await self.untagResource(input, logger: logger) } + /// Updates an AI Agent. + @Sendable + @inlinable + public func updateAIAgent(_ input: UpdateAIAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAIAgentResponse { + try await self.client.execute( + operation: "UpdateAIAgent", + path: "/assistants/{assistantId}/aiagents/{aiAgentId}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates an AI Agent. + /// + /// Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - configuration: The configuration of the Amazon Q in Connect AI Agent. + /// - description: The description of the Amazon Q in Connect AI Agent. + /// - visibilityStatus: The visbility status of the Amazon Q in Connect AI Agent. + /// - logger: Logger use during operation + @inlinable + public func updateAIAgent( + aiAgentId: String, + assistantId: String, + clientToken: String? = UpdateAIAgentRequest.idempotencyToken(), + configuration: AIAgentConfiguration? = nil, + description: String? = nil, + visibilityStatus: VisibilityStatus, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateAIAgentResponse { + let input = UpdateAIAgentRequest( + aiAgentId: aiAgentId, + assistantId: assistantId, + clientToken: clientToken, + configuration: configuration, + description: description, + visibilityStatus: visibilityStatus + ) + return try await self.updateAIAgent(input, logger: logger) + } + + /// Updates an AI Prompt. + @Sendable + @inlinable + public func updateAIPrompt(_ input: UpdateAIPromptRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAIPromptResponse { + try await self.client.execute( + operation: "UpdateAIPrompt", + path: "/assistants/{assistantId}/aiprompts/{aiPromptId}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates an AI Prompt. + /// + /// Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI Prompt. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + /// - description: The description of the Amazon Q in Connect AI Prompt. + /// - templateConfiguration: The configuration of the prompt template for this AI Prompt. + /// - visibilityStatus: The visibility status of the Amazon Q in Connect AI prompt. + /// - logger: Logger use during operation + @inlinable + public func updateAIPrompt( + aiPromptId: String, + assistantId: String, + clientToken: String? = UpdateAIPromptRequest.idempotencyToken(), + description: String? = nil, + templateConfiguration: AIPromptTemplateConfiguration? = nil, + visibilityStatus: VisibilityStatus, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateAIPromptResponse { + let input = UpdateAIPromptRequest( + aiPromptId: aiPromptId, + assistantId: assistantId, + clientToken: clientToken, + description: description, + templateConfiguration: templateConfiguration, + visibilityStatus: visibilityStatus + ) + return try await self.updateAIPrompt(input, logger: logger) + } + + /// Updates the AI Agent that is set for use by defafult on an Amazon Q in Connect Assistant. + @Sendable + @inlinable + public func updateAssistantAIAgent(_ input: UpdateAssistantAIAgentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAssistantAIAgentResponse { + try await self.client.execute( + operation: "UpdateAssistantAIAgent", + path: "/assistants/{assistantId}/aiagentConfiguration", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the AI Agent that is set for use by defafult on an Amazon Q in Connect Assistant. + /// + /// Parameters: + /// - aiAgentType: The type of the AI Agent being updated for use by default on the Amazon Q in Connect Assistant. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - configuration: The configuration of the AI Agent being updated for use by default on the Amazon Q in Connect Assistant. + /// - logger: Logger use during operation + @inlinable + public func updateAssistantAIAgent( + aiAgentType: AIAgentType, + assistantId: String, + configuration: AIAgentConfigurationData, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateAssistantAIAgentResponse { + let input = UpdateAssistantAIAgentRequest( + aiAgentType: aiAgentType, + assistantId: assistantId, + configuration: configuration + ) + return try await self.updateAssistantAIAgent(input, logger: logger) + } + /// Updates information about the content. @Sendable @inlinable @@ -1813,6 +2522,7 @@ public struct QConnect: AWSService { /// Updates a session. A session is a contextual container used for generating recommendations. Amazon Connect updates the existing Amazon Q in Connect session for each contact on which Amazon Q in Connect is enabled. /// /// Parameters: + /// - aiAgentConfiguration: The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that should be used by Amazon Q in Connect for this Session. /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. /// - description: The description. /// - sessionId: The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN. @@ -1820,6 +2530,7 @@ public struct QConnect: AWSService { /// - logger: Logger use during operation @inlinable public func updateSession( + aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, assistantId: String, description: String? = nil, sessionId: String, @@ -1827,6 +2538,7 @@ public struct QConnect: AWSService { logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateSessionResponse { let input = UpdateSessionRequest( + aiAgentConfiguration: aiAgentConfiguration, assistantId: assistantId, description: description, sessionId: sessionId, @@ -1834,6 +2546,44 @@ public struct QConnect: AWSService { ) return try await self.updateSession(input, logger: logger) } + + /// Updates the data stored on an Amazon Q in Connect Session. + @Sendable + @inlinable + public func updateSessionData(_ input: UpdateSessionDataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSessionDataResponse { + try await self.client.execute( + operation: "UpdateSessionData", + path: "/assistants/{assistantId}/sessions/{sessionId}/data", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates the data stored on an Amazon Q in Connect Session. + /// + /// Parameters: + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - data: The data stored on the Amazon Q in Connect Session. + /// - namespace: The namespace into which the session data is stored. Supported namespaces are: Custom + /// - sessionId: The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - logger: Logger use during operation + @inlinable + public func updateSessionData( + assistantId: String, + data: [RuntimeSessionData], + namespace: SessionDataNamespace? = nil, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateSessionDataResponse { + let input = UpdateSessionDataRequest( + assistantId: assistantId, + data: data, + namespace: namespace, + sessionId: sessionId + ) + return try await self.updateSessionData(input, logger: logger) + } } extension QConnect { @@ -1849,6 +2599,172 @@ extension QConnect { @available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) extension QConnect { + /// Return PaginatorSequence for operation ``listAIAgentVersions(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listAIAgentVersionsPaginator( + _ input: ListAIAgentVersionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAIAgentVersions, + inputKey: \ListAIAgentVersionsRequest.nextToken, + outputKey: \ListAIAgentVersionsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listAIAgentVersions(_:logger:)``. + /// + /// - Parameters: + /// - aiAgentId: The identifier of the Amazon Q in Connect AI Agent for which versions are to be listed. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - origin: The origin of the AI Agent versions to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger used for logging + @inlinable + public func listAIAgentVersionsPaginator( + aiAgentId: String, + assistantId: String, + maxResults: Int? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListAIAgentVersionsRequest( + aiAgentId: aiAgentId, + assistantId: assistantId, + maxResults: maxResults, + origin: origin + ) + return self.listAIAgentVersionsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listAIAgents(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listAIAgentsPaginator( + _ input: ListAIAgentsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAIAgents, + inputKey: \ListAIAgentsRequest.nextToken, + outputKey: \ListAIAgentsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listAIAgents(_:logger:)``. + /// + /// - Parameters: + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - origin: The origin of the AI Agents to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger used for logging + @inlinable + public func listAIAgentsPaginator( + assistantId: String, + maxResults: Int? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListAIAgentsRequest( + assistantId: assistantId, + maxResults: maxResults, + origin: origin + ) + return self.listAIAgentsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listAIPromptVersions(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listAIPromptVersionsPaginator( + _ input: ListAIPromptVersionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAIPromptVersions, + inputKey: \ListAIPromptVersionsRequest.nextToken, + outputKey: \ListAIPromptVersionsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listAIPromptVersions(_:logger:)``. + /// + /// - Parameters: + /// - aiPromptId: The identifier of the Amazon Q in Connect AI prompt for which versions are to be listed. + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - origin: The origin of the AI Prompt versions to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger used for logging + @inlinable + public func listAIPromptVersionsPaginator( + aiPromptId: String, + assistantId: String, + maxResults: Int? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListAIPromptVersionsRequest( + aiPromptId: aiPromptId, + assistantId: assistantId, + maxResults: maxResults, + origin: origin + ) + return self.listAIPromptVersionsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listAIPrompts(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listAIPromptsPaginator( + _ input: ListAIPromptsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listAIPrompts, + inputKey: \ListAIPromptsRequest.nextToken, + outputKey: \ListAIPromptsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listAIPrompts(_:logger:)``. + /// + /// - Parameters: + /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + /// - maxResults: The maximum number of results to return per page. + /// - origin: The origin of the AI Prompts to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + /// - logger: Logger used for logging + @inlinable + public func listAIPromptsPaginator( + assistantId: String, + maxResults: Int? = nil, + origin: Origin? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListAIPromptsRequest( + assistantId: assistantId, + maxResults: maxResults, + origin: origin + ) + return self.listAIPromptsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listAssistantAssociations(_:logger:)``. /// /// - Parameters: @@ -2129,7 +3045,9 @@ extension QConnect { /// - Parameters: /// - assistantId: The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. /// - maxResults: The maximum number of results to return per page. + /// - overrideKnowledgeBaseSearchType: The search type to be used against the Knowledge Base for this request. The values can be SEMANTIC which uses vector embeddings or HYBRID which use vector embeddings and raw text. /// - queryCondition: Information about how to query content. + /// - queryInputData: Information about the query. /// - queryText: The text to search for. /// - sessionId: The identifier of the Amazon Q in Connect session. Can be either the ID or the ARN. URLs cannot contain the ARN. /// - logger: Logger used for logging @@ -2138,15 +3056,19 @@ extension QConnect { public func queryAssistantPaginator( assistantId: String, maxResults: Int? = nil, + overrideKnowledgeBaseSearchType: KnowledgeBaseSearchType? = nil, queryCondition: [QueryCondition]? = nil, - queryText: String, + queryInputData: QueryInputData? = nil, + queryText: String? = nil, sessionId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) -> AWSClient.PaginatorSequence { let input = QueryAssistantRequest( assistantId: assistantId, maxResults: maxResults, + overrideKnowledgeBaseSearchType: overrideKnowledgeBaseSearchType, queryCondition: queryCondition, + queryInputData: queryInputData, queryText: queryText, sessionId: sessionId ) @@ -2277,6 +3199,56 @@ extension QConnect { } } +extension QConnect.ListAIAgentVersionsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> QConnect.ListAIAgentVersionsRequest { + return .init( + aiAgentId: self.aiAgentId, + assistantId: self.assistantId, + maxResults: self.maxResults, + nextToken: token, + origin: self.origin + ) + } +} + +extension QConnect.ListAIAgentsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> QConnect.ListAIAgentsRequest { + return .init( + assistantId: self.assistantId, + maxResults: self.maxResults, + nextToken: token, + origin: self.origin + ) + } +} + +extension QConnect.ListAIPromptVersionsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> QConnect.ListAIPromptVersionsRequest { + return .init( + aiPromptId: self.aiPromptId, + assistantId: self.assistantId, + maxResults: self.maxResults, + nextToken: token, + origin: self.origin + ) + } +} + +extension QConnect.ListAIPromptsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> QConnect.ListAIPromptsRequest { + return .init( + assistantId: self.assistantId, + maxResults: self.maxResults, + nextToken: token, + origin: self.origin + ) + } +} + extension QConnect.ListAssistantAssociationsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> QConnect.ListAssistantAssociationsRequest { @@ -2360,7 +3332,9 @@ extension QConnect.QueryAssistantRequest: AWSPaginateToken { assistantId: self.assistantId, maxResults: self.maxResults, nextToken: token, + overrideKnowledgeBaseSearchType: self.overrideKnowledgeBaseSearchType, queryCondition: self.queryCondition, + queryInputData: self.queryInputData, queryText: self.queryText, sessionId: self.sessionId ) diff --git a/Sources/Soto/Services/QConnect/QConnect_shapes.swift b/Sources/Soto/Services/QConnect/QConnect_shapes.swift index 066dd6fd4c..869045e762 100644 --- a/Sources/Soto/Services/QConnect/QConnect_shapes.swift +++ b/Sources/Soto/Services/QConnect/QConnect_shapes.swift @@ -26,6 +26,35 @@ import Foundation extension QConnect { // MARK: Enums + public enum AIAgentAssociationConfigurationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case knowledgeBase = "KNOWLEDGE_BASE" + public var description: String { return self.rawValue } + } + + public enum AIAgentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case answerRecommendation = "ANSWER_RECOMMENDATION" + case manualSearch = "MANUAL_SEARCH" + public var description: String { return self.rawValue } + } + + public enum AIPromptAPIFormat: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case anthropicClaudeMessages = "ANTHROPIC_CLAUDE_MESSAGES" + case anthropicClaudeTextCompletions = "ANTHROPIC_CLAUDE_TEXT_COMPLETIONS" + public var description: String { return self.rawValue } + } + + public enum AIPromptTemplateType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case text = "TEXT" + public var description: String { return self.rawValue } + } + + public enum AIPromptType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case answerGeneration = "ANSWER_GENERATION" + case intentLabelingGeneration = "INTENT_LABELING_GENERATION" + case queryReformulation = "QUERY_REFORMULATION" + public var description: String { return self.rawValue } + } + public enum AssistantCapabilityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case v1 = "V1" case v2 = "V2" @@ -52,6 +81,14 @@ extension QConnect { public var description: String { return self.rawValue } } + public enum ChunkingStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fixedSize = "FIXED_SIZE" + case hierarchical = "HIERARCHICAL" + case none = "NONE" + case semantic = "SEMANTIC" + public var description: String { return self.rawValue } + } + public enum ContentAssociationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case amazonConnectGuide = "AMAZON_CONNECT_GUIDE" public var description: String { return self.rawValue } @@ -98,6 +135,12 @@ extension QConnect { public var description: String { return self.rawValue } } + public enum KnowledgeBaseSearchType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hybrid = "HYBRID" + case semantic = "SEMANTIC" + public var description: String { return self.rawValue } + } + public enum KnowledgeBaseStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case active = "ACTIVE" case createFailed = "CREATE_FAILED" @@ -111,6 +154,8 @@ extension QConnect { public enum KnowledgeBaseType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case custom = "CUSTOM" case external = "EXTERNAL" + case managed = "MANAGED" + case messageTemplates = "MESSAGE_TEMPLATES" case quickResponses = "QUICK_RESPONSES" public var description: String { return self.rawValue } } @@ -121,6 +166,17 @@ extension QConnect { public var description: String { return self.rawValue } } + public enum Origin: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case customer = "CUSTOMER" + case system = "SYSTEM" + public var description: String { return self.rawValue } + } + + public enum ParsingStrategy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case bedrockFoundationModel = "BEDROCK_FOUNDATION_MODEL" + public var description: String { return self.rawValue } + } + public enum Priority: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case high = "HIGH" case low = "LOW" @@ -140,6 +196,7 @@ extension QConnect { public enum QueryResultType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case generativeAnswer = "GENERATIVE_ANSWER" + case intentAnswer = "INTENT_ANSWER" case knowledgeContent = "KNOWLEDGE_CONTENT" public var description: String { return self.rawValue } } @@ -182,12 +239,19 @@ extension QConnect { } public enum RecommendationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case detectedIntent = "DETECTED_INTENT" case generativeAnswer = "GENERATIVE_ANSWER" case generativeResponse = "GENERATIVE_RESPONSE" case knowledgeContent = "KNOWLEDGE_CONTENT" public var description: String { return self.rawValue } } + public enum ReferenceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case knowledgeBase = "KNOWLEDGE_BASE" + case webCrawler = "WEB_CRAWLER" + public var description: String { return self.rawValue } + } + public enum Relevance: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case helpful = "HELPFUL" case notHelpful = "NOT_HELPFUL" @@ -201,22 +265,109 @@ extension QConnect { public var description: String { return self.rawValue } } + public enum SessionDataNamespace: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case custom = "Custom" + public var description: String { return self.rawValue } + } + public enum SourceContentType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case knowledgeContent = "KNOWLEDGE_CONTENT" public var description: String { return self.rawValue } } + public enum Status: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case createFailed = "CREATE_FAILED" + case createInProgress = "CREATE_IN_PROGRESS" + case deleted = "DELETED" + case deleteFailed = "DELETE_FAILED" + case deleteInProgress = "DELETE_IN_PROGRESS" + public var description: String { return self.rawValue } + } + + public enum SyncStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case createInProgress = "CREATE_IN_PROGRESS" + case syncingInProgress = "SYNCING_IN_PROGRESS" + case syncFailed = "SYNC_FAILED" + case syncSuccess = "SYNC_SUCCESS" + public var description: String { return self.rawValue } + } + public enum TargetType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case recommendation = "RECOMMENDATION" case result = "RESULT" public var description: String { return self.rawValue } } + public enum VisibilityStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case published = "PUBLISHED" + case saved = "SAVED" + public var description: String { return self.rawValue } + } + + public enum WebScopeType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hostOnly = "HOST_ONLY" + case subdomains = "SUBDOMAINS" + public var description: String { return self.rawValue } + } + + public enum AIAgentConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// The configuration for AI Agents of type ANSWER_RECOMMENDATION. + case answerRecommendationAIAgentConfiguration(AnswerRecommendationAIAgentConfiguration) + /// The configuration for AI Agents of type MANUAL_SEARCH. + case manualSearchAIAgentConfiguration(ManualSearchAIAgentConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .answerRecommendationAIAgentConfiguration: + let value = try container.decode(AnswerRecommendationAIAgentConfiguration.self, forKey: .answerRecommendationAIAgentConfiguration) + self = .answerRecommendationAIAgentConfiguration(value) + case .manualSearchAIAgentConfiguration: + let value = try container.decode(ManualSearchAIAgentConfiguration.self, forKey: .manualSearchAIAgentConfiguration) + self = .manualSearchAIAgentConfiguration(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .answerRecommendationAIAgentConfiguration(let value): + try container.encode(value, forKey: .answerRecommendationAIAgentConfiguration) + case .manualSearchAIAgentConfiguration(let value): + try container.encode(value, forKey: .manualSearchAIAgentConfiguration) + } + } + + public func validate(name: String) throws { + switch self { + case .answerRecommendationAIAgentConfiguration(let value): + try value.validate(name: "\(name).answerRecommendationAIAgentConfiguration") + case .manualSearchAIAgentConfiguration(let value): + try value.validate(name: "\(name).manualSearchAIAgentConfiguration") + } + } + + private enum CodingKeys: String, CodingKey { + case answerRecommendationAIAgentConfiguration = "answerRecommendationAIAgentConfiguration" + case manualSearchAIAgentConfiguration = "manualSearchAIAgentConfiguration" + } + } + public enum DataDetails: AWSDecodableShape, Sendable { /// Details about the content data. case contentData(ContentDataDetails) /// Details about the generative data. case generativeData(GenerativeDataDetails) + /// Details about the intent data. + case intentDetectedData(IntentDetectedDataDetails) /// Details about the content data. case sourceContentData(SourceContentDataDetails) @@ -236,6 +387,9 @@ extension QConnect { case .generativeData: let value = try container.decode(GenerativeDataDetails.self, forKey: .generativeData) self = .generativeData(value) + case .intentDetectedData: + let value = try container.decode(IntentDetectedDataDetails.self, forKey: .intentDetectedData) + self = .intentDetectedData(value) case .sourceContentData: let value = try container.decode(SourceContentDataDetails.self, forKey: .sourceContentData) self = .sourceContentData(value) @@ -245,6 +399,7 @@ extension QConnect { private enum CodingKeys: String, CodingKey { case contentData = "contentData" case generativeData = "generativeData" + case intentDetectedData = "intentDetectedData" case sourceContentData = "sourceContentData" } } @@ -331,6 +486,87 @@ extension QConnect { } } + public enum QueryInputData: AWSEncodableShape, Sendable { + /// Input information for the intent. + case intentInputData(IntentInputData) + /// Input information for the query. + case queryTextInputData(QueryTextInputData) + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .intentInputData(let value): + try container.encode(value, forKey: .intentInputData) + case .queryTextInputData(let value): + try container.encode(value, forKey: .queryTextInputData) + } + } + + public func validate(name: String) throws { + switch self { + case .intentInputData(let value): + try value.validate(name: "\(name).intentInputData") + case .queryTextInputData(let value): + try value.validate(name: "\(name).queryTextInputData") + } + } + + private enum CodingKeys: String, CodingKey { + case intentInputData = "intentInputData" + case queryTextInputData = "queryTextInputData" + } + } + + public enum SourceConfiguration: AWSEncodableShape & AWSDecodableShape, Sendable { + /// Configuration information for Amazon AppIntegrations to automatically ingest content. + case appIntegrations(AppIntegrationsConfiguration) + /// Source configuration for managed resources. + case managedSourceConfiguration(ManagedSourceConfiguration) + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + guard container.allKeys.count == 1, let key = container.allKeys.first else { + let context = DecodingError.Context( + codingPath: container.codingPath, + debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" + ) + throw DecodingError.dataCorrupted(context) + } + switch key { + case .appIntegrations: + let value = try container.decode(AppIntegrationsConfiguration.self, forKey: .appIntegrations) + self = .appIntegrations(value) + case .managedSourceConfiguration: + let value = try container.decode(ManagedSourceConfiguration.self, forKey: .managedSourceConfiguration) + self = .managedSourceConfiguration(value) + } + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + switch self { + case .appIntegrations(let value): + try container.encode(value, forKey: .appIntegrations) + case .managedSourceConfiguration(let value): + try container.encode(value, forKey: .managedSourceConfiguration) + } + } + + public func validate(name: String) throws { + switch self { + case .appIntegrations(let value): + try value.validate(name: "\(name).appIntegrations") + case .managedSourceConfiguration(let value): + try value.validate(name: "\(name).managedSourceConfiguration") + } + } + + private enum CodingKeys: String, CodingKey { + case appIntegrations = "appIntegrations" + case managedSourceConfiguration = "managedSourceConfiguration" + } + } + public enum TagFilter: AWSEncodableShape & AWSDecodableShape, Sendable { /// A list of conditions which would be applied together with an AND condition. case andConditions([TagCondition]) @@ -397,170 +633,530 @@ extension QConnect { // MARK: Shapes - public struct AmazonConnectGuideAssociationData: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) of an Amazon Connect flow. Step-by-step guides are a type of flow. - public let flowId: String? - - @inlinable - public init(flowId: String? = nil) { - self.flowId = flowId - } - - public func validate(name: String) throws { - try self.validate(self.flowId, name: "flowId", parent: name, max: 2048) - try self.validate(self.flowId, name: "flowId", parent: name, min: 1) - try self.validate(self.flowId, name: "flowId", parent: name, pattern: "^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$") - } - - private enum CodingKeys: String, CodingKey { - case flowId = "flowId" - } - } - - public struct AppIntegrationsConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content. For Salesforce, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least Id, ArticleNumber, VersionNumber, Title, PublishStatus, and IsDeleted as source fields. For ServiceNow, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least number, short_description, sys_mod_count, workflow_state, and active as source fields. For Zendesk, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least id, title, updated_at, and draft as source fields. For SharePoint, your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among docx, pdf, html, htm, and txt. For Amazon S3, the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The SourceURI of your DataIntegration must use the following format: s3://your_s3_bucket_name. The bucket policy of the corresponding S3 bucket must allow the Amazon Web Services principal app-integrations.amazonaws.com to perform s3:ListBucket, s3:GetObject, and s3:GetBucketLocation against the bucket. - public let appIntegrationArn: String - /// The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration. For Salesforce, you must include at least Id, ArticleNumber, VersionNumber, Title, PublishStatus, and IsDeleted. For ServiceNow, you must include at least number, short_description, sys_mod_count, workflow_state, and active. For Zendesk, you must include at least id, title, updated_at, and draft. Make sure to include additional fields. These fields are indexed and used to source recommendations. - public let objectFields: [String]? + public struct AIAgentConfigurationData: AWSEncodableShape & AWSDecodableShape { + /// The ID of the AI Agent to be configured. + public let aiAgentId: String @inlinable - public init(appIntegrationArn: String, objectFields: [String]? = nil) { - self.appIntegrationArn = appIntegrationArn - self.objectFields = objectFields + public init(aiAgentId: String) { + self.aiAgentId = aiAgentId } public func validate(name: String) throws { - try self.validate(self.appIntegrationArn, name: "appIntegrationArn", parent: name, max: 2048) - try self.validate(self.appIntegrationArn, name: "appIntegrationArn", parent: name, min: 1) - try self.validate(self.appIntegrationArn, name: "appIntegrationArn", parent: name, pattern: "^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$") - try self.objectFields?.forEach { - try validate($0, name: "objectFields[]", parent: name, max: 4096) - try validate($0, name: "objectFields[]", parent: name, min: 1) - } - try self.validate(self.objectFields, name: "objectFields", parent: name, max: 100) - try self.validate(self.objectFields, name: "objectFields", parent: name, min: 1) + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") } private enum CodingKeys: String, CodingKey { - case appIntegrationArn = "appIntegrationArn" - case objectFields = "objectFields" + case aiAgentId = "aiAgentId" } } - public struct AssistantAssociationData: AWSDecodableShape { + public struct AIAgentData: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the AI agent. + public let aiAgentArn: String + /// The identifier of the AI Agent. + public let aiAgentId: String /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. public let assistantArn: String - /// The Amazon Resource Name (ARN) of the assistant association. - public let assistantAssociationArn: String - /// The identifier of the assistant association. - public let assistantAssociationId: String - /// The identifier of the Amazon Q in Connect assistant. + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantId: String - /// A union type that currently has a single argument, the knowledge base ID. - public let associationData: AssistantAssociationOutputData - /// The type of association. - public let associationType: AssociationType + /// Configuration for the AI Agent. + public let configuration: AIAgentConfiguration + /// The description of the AI Agent. + public let description: String? + /// The time the AI Agent was last modified. + public let modifiedTime: Date? + /// The name of the AI Agent. + public let name: String + /// Specifies the origin of the AI Agent. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + public let origin: Origin? + /// The status of the AI Agent. + public let status: Status? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? + /// The type of the AI Agent. + public let type: AIAgentType + /// The visibility status of the AI Agent. + public let visibilityStatus: VisibilityStatus @inlinable - public init(assistantArn: String, assistantAssociationArn: String, assistantAssociationId: String, assistantId: String, associationData: AssistantAssociationOutputData, associationType: AssociationType, tags: [String: String]? = nil) { + public init(aiAgentArn: String, aiAgentId: String, assistantArn: String, assistantId: String, configuration: AIAgentConfiguration, description: String? = nil, modifiedTime: Date? = nil, name: String, origin: Origin? = nil, status: Status? = nil, tags: [String: String]? = nil, type: AIAgentType, visibilityStatus: VisibilityStatus) { + self.aiAgentArn = aiAgentArn + self.aiAgentId = aiAgentId self.assistantArn = assistantArn - self.assistantAssociationArn = assistantAssociationArn - self.assistantAssociationId = assistantAssociationId self.assistantId = assistantId - self.associationData = associationData - self.associationType = associationType + self.configuration = configuration + self.description = description + self.modifiedTime = modifiedTime + self.name = name + self.origin = origin + self.status = status self.tags = tags + self.type = type + self.visibilityStatus = visibilityStatus } private enum CodingKeys: String, CodingKey { + case aiAgentArn = "aiAgentArn" + case aiAgentId = "aiAgentId" case assistantArn = "assistantArn" - case assistantAssociationArn = "assistantAssociationArn" - case assistantAssociationId = "assistantAssociationId" case assistantId = "assistantId" - case associationData = "associationData" - case associationType = "associationType" + case configuration = "configuration" + case description = "description" + case modifiedTime = "modifiedTime" + case name = "name" + case origin = "origin" + case status = "status" case tags = "tags" + case type = "type" + case visibilityStatus = "visibilityStatus" } } - public struct AssistantAssociationSummary: AWSDecodableShape { + public struct AIAgentSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the AI agent. + public let aiAgentArn: String + /// The identifier of the AI Agent. + public let aiAgentId: String /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. public let assistantArn: String - /// The Amazon Resource Name (ARN) of the assistant association. - public let assistantAssociationArn: String - /// The identifier of the assistant association. - public let assistantAssociationId: String - /// The identifier of the Amazon Q in Connect assistant. + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantId: String - /// The association data. - public let associationData: AssistantAssociationOutputData - /// The type of association. - public let associationType: AssociationType + /// The configuration for the AI Agent. + public let configuration: AIAgentConfiguration? + /// The description of the AI Agent. + public let description: String? + /// The time the AI Agent was last modified. + public let modifiedTime: Date? + /// The name of the AI Agent. + public let name: String + /// The origin of the AI Agent. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + public let origin: Origin? + /// The status of the AI Agent. + public let status: Status? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? + /// The type of the AI Agent. + public let type: AIAgentType + /// The visibility status of the AI Agent. + public let visibilityStatus: VisibilityStatus @inlinable - public init(assistantArn: String, assistantAssociationArn: String, assistantAssociationId: String, assistantId: String, associationData: AssistantAssociationOutputData, associationType: AssociationType, tags: [String: String]? = nil) { + public init(aiAgentArn: String, aiAgentId: String, assistantArn: String, assistantId: String, configuration: AIAgentConfiguration? = nil, description: String? = nil, modifiedTime: Date? = nil, name: String, origin: Origin? = nil, status: Status? = nil, tags: [String: String]? = nil, type: AIAgentType, visibilityStatus: VisibilityStatus) { + self.aiAgentArn = aiAgentArn + self.aiAgentId = aiAgentId self.assistantArn = assistantArn - self.assistantAssociationArn = assistantAssociationArn - self.assistantAssociationId = assistantAssociationId self.assistantId = assistantId - self.associationData = associationData - self.associationType = associationType + self.configuration = configuration + self.description = description + self.modifiedTime = modifiedTime + self.name = name + self.origin = origin + self.status = status self.tags = tags + self.type = type + self.visibilityStatus = visibilityStatus } private enum CodingKeys: String, CodingKey { + case aiAgentArn = "aiAgentArn" + case aiAgentId = "aiAgentId" case assistantArn = "assistantArn" - case assistantAssociationArn = "assistantAssociationArn" - case assistantAssociationId = "assistantAssociationId" case assistantId = "assistantId" - case associationData = "associationData" - case associationType = "associationType" + case configuration = "configuration" + case description = "description" + case modifiedTime = "modifiedTime" + case name = "name" + case origin = "origin" + case status = "status" case tags = "tags" + case type = "type" + case visibilityStatus = "visibilityStatus" } } - public struct AssistantCapabilityConfiguration: AWSDecodableShape { - /// The type of Amazon Q in Connect assistant capability. - public let type: AssistantCapabilityType? + public struct AIAgentVersionSummary: AWSDecodableShape { + /// The data for the summary of the AI Agent version. + public let aiAgentSummary: AIAgentSummary? + /// The version number for this AI Agent version. + public let versionNumber: Int64? @inlinable - public init(type: AssistantCapabilityType? = nil) { - self.type = type + public init(aiAgentSummary: AIAgentSummary? = nil, versionNumber: Int64? = nil) { + self.aiAgentSummary = aiAgentSummary + self.versionNumber = versionNumber } private enum CodingKeys: String, CodingKey { - case type = "type" + case aiAgentSummary = "aiAgentSummary" + case versionNumber = "versionNumber" } } - public struct AssistantData: AWSDecodableShape { + public struct AIPromptData: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the AI Prompt. + public let aiPromptArn: String + /// The identifier of the Amazon Q in Connect AI prompt. + public let aiPromptId: String + /// The API format used for this AI Prompt. + public let apiFormat: AIPromptAPIFormat /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. public let assistantArn: String - /// The identifier of the Amazon Q in Connect assistant. + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantId: String - /// The configuration information for the Amazon Q in Connect assistant capability. - public let capabilityConfiguration: AssistantCapabilityConfiguration? - /// The description. + /// The description of the AI Prompt. public let description: String? - /// The configuration information for the Amazon Q in Connect assistant integration. - public let integrationConfiguration: AssistantIntegrationConfiguration? - /// The name. + /// The identifier of the model used for this AI Prompt. Model Ids supported are: CLAUDE_3_HAIKU_20240307_V1. + public let modelId: String + /// The time the AI Prompt was last modified. + public let modifiedTime: Date? + /// The name of the AI Prompt public let name: String - /// The configuration information for the customer managed key used for encryption. This KMS key must have a policy that allows kms:CreateGrant, kms:DescribeKey, kms:Decrypt, and kms:GenerateDataKey* permissions to the IAM identity using the key to invoke Amazon Q in Connect. To use Amazon Q in Connect with chat, the key policy must also allow kms:Decrypt, kms:GenerateDataKey*, and kms:DescribeKey permissions to the connect.amazonaws.com service principal. For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for your instance. - public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? - /// The status of the assistant. - public let status: AssistantStatus + /// The origin of the AI Prompt. SYSTEM for a default AI Prompt created by Q in Connect or CUSTOMER for an AI Prompt created by calling AI Prompt creation APIs. + public let origin: Origin? + /// The status of the AI Prompt. + public let status: Status? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? - /// The type of assistant. - public let type: AssistantType - - @inlinable - public init(assistantArn: String, assistantId: String, capabilityConfiguration: AssistantCapabilityConfiguration? = nil, description: String? = nil, integrationConfiguration: AssistantIntegrationConfiguration? = nil, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, status: AssistantStatus, tags: [String: String]? = nil, type: AssistantType) { + /// The configuration of the prompt template for this AI Prompt. + public let templateConfiguration: AIPromptTemplateConfiguration + /// The type of the prompt template for this AI Prompt. + public let templateType: AIPromptTemplateType + /// The type of this AI Prompt. + public let type: AIPromptType + /// The visibility status of the AI Prompt. + public let visibilityStatus: VisibilityStatus + + @inlinable + public init(aiPromptArn: String, aiPromptId: String, apiFormat: AIPromptAPIFormat, assistantArn: String, assistantId: String, description: String? = nil, modelId: String, modifiedTime: Date? = nil, name: String, origin: Origin? = nil, status: Status? = nil, tags: [String: String]? = nil, templateConfiguration: AIPromptTemplateConfiguration, templateType: AIPromptTemplateType, type: AIPromptType, visibilityStatus: VisibilityStatus) { + self.aiPromptArn = aiPromptArn + self.aiPromptId = aiPromptId + self.apiFormat = apiFormat + self.assistantArn = assistantArn + self.assistantId = assistantId + self.description = description + self.modelId = modelId + self.modifiedTime = modifiedTime + self.name = name + self.origin = origin + self.status = status + self.tags = tags + self.templateConfiguration = templateConfiguration + self.templateType = templateType + self.type = type + self.visibilityStatus = visibilityStatus + } + + private enum CodingKeys: String, CodingKey { + case aiPromptArn = "aiPromptArn" + case aiPromptId = "aiPromptId" + case apiFormat = "apiFormat" + case assistantArn = "assistantArn" + case assistantId = "assistantId" + case description = "description" + case modelId = "modelId" + case modifiedTime = "modifiedTime" + case name = "name" + case origin = "origin" + case status = "status" + case tags = "tags" + case templateConfiguration = "templateConfiguration" + case templateType = "templateType" + case type = "type" + case visibilityStatus = "visibilityStatus" + } + } + + public struct AIPromptSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the AI Prompt. + public let aiPromptArn: String + /// The identifier of the Amazon Q in Connect AI prompt. + public let aiPromptId: String + /// The API format used for this AI Prompt. + public let apiFormat: AIPromptAPIFormat + /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. + public let assistantArn: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The description of the AI Prompt. + public let description: String? + /// The identifier of the model used for this AI Prompt. Model Ids supported are: CLAUDE_3_HAIKU_20240307_V1. + public let modelId: String + /// The time the AI Prompt was last modified. + public let modifiedTime: Date? + /// The name of the AI Prompt. + public let name: String + /// The origin of the AI Prompt. SYSTEM for a default AI Prompt created by Q in Connect or CUSTOMER for an AI Prompt created by calling AI Prompt creation APIs. + public let origin: Origin? + /// The status of the AI Prompt. + public let status: Status? + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + /// The type of the prompt template for this AI Prompt. + public let templateType: AIPromptTemplateType + /// The type of this AI Prompt. + public let type: AIPromptType + /// The visibility status of the AI Prompt. + public let visibilityStatus: VisibilityStatus + + @inlinable + public init(aiPromptArn: String, aiPromptId: String, apiFormat: AIPromptAPIFormat, assistantArn: String, assistantId: String, description: String? = nil, modelId: String, modifiedTime: Date? = nil, name: String, origin: Origin? = nil, status: Status? = nil, tags: [String: String]? = nil, templateType: AIPromptTemplateType, type: AIPromptType, visibilityStatus: VisibilityStatus) { + self.aiPromptArn = aiPromptArn + self.aiPromptId = aiPromptId + self.apiFormat = apiFormat + self.assistantArn = assistantArn + self.assistantId = assistantId + self.description = description + self.modelId = modelId + self.modifiedTime = modifiedTime + self.name = name + self.origin = origin + self.status = status + self.tags = tags + self.templateType = templateType + self.type = type + self.visibilityStatus = visibilityStatus + } + + private enum CodingKeys: String, CodingKey { + case aiPromptArn = "aiPromptArn" + case aiPromptId = "aiPromptId" + case apiFormat = "apiFormat" + case assistantArn = "assistantArn" + case assistantId = "assistantId" + case description = "description" + case modelId = "modelId" + case modifiedTime = "modifiedTime" + case name = "name" + case origin = "origin" + case status = "status" + case tags = "tags" + case templateType = "templateType" + case type = "type" + case visibilityStatus = "visibilityStatus" + } + } + + public struct AIPromptVersionSummary: AWSDecodableShape { + /// The date for the summary of the AI Prompt version. + public let aiPromptSummary: AIPromptSummary? + /// The version number for this AI Prompt version. + public let versionNumber: Int64? + + @inlinable + public init(aiPromptSummary: AIPromptSummary? = nil, versionNumber: Int64? = nil) { + self.aiPromptSummary = aiPromptSummary + self.versionNumber = versionNumber + } + + private enum CodingKeys: String, CodingKey { + case aiPromptSummary = "aiPromptSummary" + case versionNumber = "versionNumber" + } + } + + public struct AmazonConnectGuideAssociationData: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of an Amazon Connect flow. Step-by-step guides are a type of flow. + public let flowId: String? + + @inlinable + public init(flowId: String? = nil) { + self.flowId = flowId + } + + public func validate(name: String) throws { + try self.validate(self.flowId, name: "flowId", parent: name, max: 2048) + try self.validate(self.flowId, name: "flowId", parent: name, min: 1) + try self.validate(self.flowId, name: "flowId", parent: name, pattern: "^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$") + } + + private enum CodingKeys: String, CodingKey { + case flowId = "flowId" + } + } + + public struct AnswerRecommendationAIAgentConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The AI Prompt identifier for the Answer Generation prompt used by the ANSWER_RECOMMENDATION AI Agent. + public let answerGenerationAIPromptId: String? + /// The association configurations for overriding behavior on this AI Agent. + public let associationConfigurations: [AssociationConfiguration]? + /// The AI Prompt identifier for the Intent Labeling prompt used by the ANSWER_RECOMMENDATION AI Agent. + public let intentLabelingGenerationAIPromptId: String? + /// The AI Prompt identifier for the Query Reformulation prompt used by the ANSWER_RECOMMENDATION AI Agent. + public let queryReformulationAIPromptId: String? + + @inlinable + public init(answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil, intentLabelingGenerationAIPromptId: String? = nil, queryReformulationAIPromptId: String? = nil) { + self.answerGenerationAIPromptId = answerGenerationAIPromptId + self.associationConfigurations = associationConfigurations + self.intentLabelingGenerationAIPromptId = intentLabelingGenerationAIPromptId + self.queryReformulationAIPromptId = queryReformulationAIPromptId + } + + public func validate(name: String) throws { + try self.validate(self.answerGenerationAIPromptId, name: "answerGenerationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") + try self.associationConfigurations?.forEach { + try $0.validate(name: "\(name).associationConfigurations[]") + } + try self.validate(self.intentLabelingGenerationAIPromptId, name: "intentLabelingGenerationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.queryReformulationAIPromptId, name: "queryReformulationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") + } + + private enum CodingKeys: String, CodingKey { + case answerGenerationAIPromptId = "answerGenerationAIPromptId" + case associationConfigurations = "associationConfigurations" + case intentLabelingGenerationAIPromptId = "intentLabelingGenerationAIPromptId" + case queryReformulationAIPromptId = "queryReformulationAIPromptId" + } + } + + public struct AppIntegrationsConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content. For Salesforce, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least Id, ArticleNumber, VersionNumber, Title, PublishStatus, and IsDeleted as source fields. For ServiceNow, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least number, short_description, sys_mod_count, workflow_state, and active as source fields. For Zendesk, your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least id, title, updated_at, and draft as source fields. For SharePoint, your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among docx, pdf, html, htm, and txt. For Amazon S3, the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The SourceURI of your DataIntegration must use the following format: s3://your_s3_bucket_name. The bucket policy of the corresponding S3 bucket must allow the Amazon Web Services principal app-integrations.amazonaws.com to perform s3:ListBucket, s3:GetObject, and s3:GetBucketLocation against the bucket. + public let appIntegrationArn: String + /// The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration. For Salesforce, you must include at least Id, ArticleNumber, VersionNumber, Title, PublishStatus, and IsDeleted. For ServiceNow, you must include at least number, short_description, sys_mod_count, workflow_state, and active. For Zendesk, you must include at least id, title, updated_at, and draft. Make sure to include additional fields. These fields are indexed and used to source recommendations. + public let objectFields: [String]? + + @inlinable + public init(appIntegrationArn: String, objectFields: [String]? = nil) { + self.appIntegrationArn = appIntegrationArn + self.objectFields = objectFields + } + + public func validate(name: String) throws { + try self.validate(self.appIntegrationArn, name: "appIntegrationArn", parent: name, max: 2048) + try self.validate(self.appIntegrationArn, name: "appIntegrationArn", parent: name, min: 1) + try self.validate(self.appIntegrationArn, name: "appIntegrationArn", parent: name, pattern: "^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$") + try self.objectFields?.forEach { + try validate($0, name: "objectFields[]", parent: name, max: 4096) + try validate($0, name: "objectFields[]", parent: name, min: 1) + } + try self.validate(self.objectFields, name: "objectFields", parent: name, max: 100) + try self.validate(self.objectFields, name: "objectFields", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case appIntegrationArn = "appIntegrationArn" + case objectFields = "objectFields" + } + } + + public struct AssistantAssociationData: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. + public let assistantArn: String + /// The Amazon Resource Name (ARN) of the assistant association. + public let assistantAssociationArn: String + /// The identifier of the assistant association. + public let assistantAssociationId: String + /// The identifier of the Amazon Q in Connect assistant. + public let assistantId: String + /// A union type that currently has a single argument, the knowledge base ID. + public let associationData: AssistantAssociationOutputData + /// The type of association. + public let associationType: AssociationType + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + + @inlinable + public init(assistantArn: String, assistantAssociationArn: String, assistantAssociationId: String, assistantId: String, associationData: AssistantAssociationOutputData, associationType: AssociationType, tags: [String: String]? = nil) { + self.assistantArn = assistantArn + self.assistantAssociationArn = assistantAssociationArn + self.assistantAssociationId = assistantAssociationId + self.assistantId = assistantId + self.associationData = associationData + self.associationType = associationType + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case assistantArn = "assistantArn" + case assistantAssociationArn = "assistantAssociationArn" + case assistantAssociationId = "assistantAssociationId" + case assistantId = "assistantId" + case associationData = "associationData" + case associationType = "associationType" + case tags = "tags" + } + } + + public struct AssistantAssociationSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. + public let assistantArn: String + /// The Amazon Resource Name (ARN) of the assistant association. + public let assistantAssociationArn: String + /// The identifier of the assistant association. + public let assistantAssociationId: String + /// The identifier of the Amazon Q in Connect assistant. + public let assistantId: String + /// The association data. + public let associationData: AssistantAssociationOutputData + /// The type of association. + public let associationType: AssociationType + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + + @inlinable + public init(assistantArn: String, assistantAssociationArn: String, assistantAssociationId: String, assistantId: String, associationData: AssistantAssociationOutputData, associationType: AssociationType, tags: [String: String]? = nil) { + self.assistantArn = assistantArn + self.assistantAssociationArn = assistantAssociationArn + self.assistantAssociationId = assistantAssociationId + self.assistantId = assistantId + self.associationData = associationData + self.associationType = associationType + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case assistantArn = "assistantArn" + case assistantAssociationArn = "assistantAssociationArn" + case assistantAssociationId = "assistantAssociationId" + case assistantId = "assistantId" + case associationData = "associationData" + case associationType = "associationType" + case tags = "tags" + } + } + + public struct AssistantCapabilityConfiguration: AWSDecodableShape { + /// The type of Amazon Q in Connect assistant capability. + public let type: AssistantCapabilityType? + + @inlinable + public init(type: AssistantCapabilityType? = nil) { + self.type = type + } + + private enum CodingKeys: String, CodingKey { + case type = "type" + } + } + + public struct AssistantData: AWSDecodableShape { + /// The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that is set on the Amazon Q in Connect Assistant. + public let aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? + /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. + public let assistantArn: String + /// The identifier of the Amazon Q in Connect assistant. + public let assistantId: String + /// The configuration information for the Amazon Q in Connect assistant capability. + public let capabilityConfiguration: AssistantCapabilityConfiguration? + /// The description. + public let description: String? + /// The configuration information for the Amazon Q in Connect assistant integration. + public let integrationConfiguration: AssistantIntegrationConfiguration? + /// The name. + public let name: String + /// The configuration information for the customer managed key used for encryption. This KMS key must have a policy that allows kms:CreateGrant, kms:DescribeKey, kms:Decrypt, and kms:GenerateDataKey* permissions to the IAM identity using the key to invoke Amazon Q in Connect. To use Amazon Q in Connect with chat, the key policy must also allow kms:Decrypt, kms:GenerateDataKey*, and kms:DescribeKey permissions to the connect.amazonaws.com service principal. For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for your instance. + public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? + /// The status of the assistant. + public let status: AssistantStatus + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + /// The type of assistant. + public let type: AssistantType + + @inlinable + public init(aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, assistantArn: String, assistantId: String, capabilityConfiguration: AssistantCapabilityConfiguration? = nil, description: String? = nil, integrationConfiguration: AssistantIntegrationConfiguration? = nil, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, status: AssistantStatus, tags: [String: String]? = nil, type: AssistantType) { + self.aiAgentConfiguration = aiAgentConfiguration self.assistantArn = assistantArn self.assistantId = assistantId self.capabilityConfiguration = capabilityConfiguration @@ -574,6 +1170,7 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { + case aiAgentConfiguration = "aiAgentConfiguration" case assistantArn = "assistantArn" case assistantId = "assistantId" case capabilityConfiguration = "capabilityConfiguration" @@ -602,6 +1199,8 @@ extension QConnect { } public struct AssistantSummary: AWSDecodableShape { + /// The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that is set on the Amazon Q in Connect Assistant. + public let aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? /// The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant. public let assistantArn: String /// The identifier of the Amazon Q in Connect assistant. @@ -624,7 +1223,8 @@ extension QConnect { public let type: AssistantType @inlinable - public init(assistantArn: String, assistantId: String, capabilityConfiguration: AssistantCapabilityConfiguration? = nil, description: String? = nil, integrationConfiguration: AssistantIntegrationConfiguration? = nil, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, status: AssistantStatus, tags: [String: String]? = nil, type: AssistantType) { + public init(aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, assistantArn: String, assistantId: String, capabilityConfiguration: AssistantCapabilityConfiguration? = nil, description: String? = nil, integrationConfiguration: AssistantIntegrationConfiguration? = nil, name: String, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, status: AssistantStatus, tags: [String: String]? = nil, type: AssistantType) { + self.aiAgentConfiguration = aiAgentConfiguration self.assistantArn = assistantArn self.assistantId = assistantId self.capabilityConfiguration = capabilityConfiguration @@ -638,16 +1238,117 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { - case assistantArn = "assistantArn" - case assistantId = "assistantId" - case capabilityConfiguration = "capabilityConfiguration" - case description = "description" - case integrationConfiguration = "integrationConfiguration" - case name = "name" - case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" - case status = "status" - case tags = "tags" - case type = "type" + case aiAgentConfiguration = "aiAgentConfiguration" + case assistantArn = "assistantArn" + case assistantId = "assistantId" + case capabilityConfiguration = "capabilityConfiguration" + case description = "description" + case integrationConfiguration = "integrationConfiguration" + case name = "name" + case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" + case status = "status" + case tags = "tags" + case type = "type" + } + } + + public struct AssociationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The data of the configuration for an Amazon Q in Connect Assistant Association. + public let associationConfigurationData: AssociationConfigurationData? + /// The identifier of the association for this Association Configuration. + public let associationId: String? + /// The type of the association for this Association Configuration. + public let associationType: AIAgentAssociationConfigurationType? + + @inlinable + public init(associationConfigurationData: AssociationConfigurationData? = nil, associationId: String? = nil, associationType: AIAgentAssociationConfigurationType? = nil) { + self.associationConfigurationData = associationConfigurationData + self.associationId = associationId + self.associationType = associationType + } + + public func validate(name: String) throws { + try self.associationConfigurationData?.validate(name: "\(name).associationConfigurationData") + try self.validate(self.associationId, name: "associationId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case associationConfigurationData = "associationConfigurationData" + case associationId = "associationId" + case associationType = "associationType" + } + } + + public struct BedrockFoundationModelConfigurationForParsing: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the foundation model. + public let modelArn: String + /// Instructions for interpreting the contents of a document. + public let parsingPrompt: ParsingPrompt? + + @inlinable + public init(modelArn: String, parsingPrompt: ParsingPrompt? = nil) { + self.modelArn = modelArn + self.parsingPrompt = parsingPrompt + } + + public func validate(name: String) throws { + try self.validate(self.modelArn, name: "modelArn", parent: name, max: 2048) + try self.validate(self.modelArn, name: "modelArn", parent: name, min: 1) + try self.validate(self.modelArn, name: "modelArn", parent: name, pattern: "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/anthropic.claude-3-haiku-20240307-v1:0$") + try self.parsingPrompt?.validate(name: "\(name).parsingPrompt") + } + + private enum CodingKeys: String, CodingKey { + case modelArn = "modelArn" + case parsingPrompt = "parsingPrompt" + } + } + + public struct ChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for NONE, then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk. + public let chunkingStrategy: ChunkingStrategy + /// Configurations for when you choose fixed-size chunking. If you set the chunkingStrategy as NONE, exclude this field. + public let fixedSizeChunkingConfiguration: FixedSizeChunkingConfiguration? + /// Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer. + public let hierarchicalChunkingConfiguration: HierarchicalChunkingConfiguration? + /// Settings for semantic document chunking for a data source. Semantic chunking splits a document into smaller documents based on groups of similar content derived from the text with natural language processing. + public let semanticChunkingConfiguration: SemanticChunkingConfiguration? + + @inlinable + public init(chunkingStrategy: ChunkingStrategy, fixedSizeChunkingConfiguration: FixedSizeChunkingConfiguration? = nil, hierarchicalChunkingConfiguration: HierarchicalChunkingConfiguration? = nil, semanticChunkingConfiguration: SemanticChunkingConfiguration? = nil) { + self.chunkingStrategy = chunkingStrategy + self.fixedSizeChunkingConfiguration = fixedSizeChunkingConfiguration + self.hierarchicalChunkingConfiguration = hierarchicalChunkingConfiguration + self.semanticChunkingConfiguration = semanticChunkingConfiguration + } + + public func validate(name: String) throws { + try self.hierarchicalChunkingConfiguration?.validate(name: "\(name).hierarchicalChunkingConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case chunkingStrategy = "chunkingStrategy" + case fixedSizeChunkingConfiguration = "fixedSizeChunkingConfiguration" + case hierarchicalChunkingConfiguration = "hierarchicalChunkingConfiguration" + case semanticChunkingConfiguration = "semanticChunkingConfiguration" + } + } + + public struct CitationSpan: AWSDecodableShape { + /// Where the text with a citation starts in the generated output. + public let beginOffsetInclusive: Int? + /// Where the text with a citation ends in the generated output. + public let endOffsetExclusive: Int? + + @inlinable + public init(beginOffsetInclusive: Int? = nil, endOffsetExclusive: Int? = nil) { + self.beginOffsetInclusive = beginOffsetInclusive + self.endOffsetExclusive = endOffsetExclusive + } + + private enum CodingKeys: String, CodingKey { + case beginOffsetInclusive = "beginOffsetInclusive" + case endOffsetExclusive = "endOffsetExclusive" } } @@ -856,75 +1557,392 @@ extension QConnect { public let knowledgeBaseArn: String? /// The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base. public let knowledgeBaseId: String? + /// The type of reference content. + public let referenceType: ReferenceType? + /// The web URL of the source content. + public let sourceURL: String? + + @inlinable + public init(contentArn: String? = nil, contentId: String? = nil, knowledgeBaseArn: String? = nil, knowledgeBaseId: String? = nil, referenceType: ReferenceType? = nil, sourceURL: String? = nil) { + self.contentArn = contentArn + self.contentId = contentId + self.knowledgeBaseArn = knowledgeBaseArn + self.knowledgeBaseId = knowledgeBaseId + self.referenceType = referenceType + self.sourceURL = sourceURL + } + + private enum CodingKeys: String, CodingKey { + case contentArn = "contentArn" + case contentId = "contentId" + case knowledgeBaseArn = "knowledgeBaseArn" + case knowledgeBaseId = "knowledgeBaseId" + case referenceType = "referenceType" + case sourceURL = "sourceURL" + } + } + + public struct ContentSummary: AWSDecodableShape { + /// The Amazon Resource Name (ARN) of the content. + public let contentArn: String + /// The identifier of the content. + public let contentId: String + /// The media type of the content. + public let contentType: String + /// The Amazon Resource Name (ARN) of the knowledge base. + public let knowledgeBaseArn: String + /// The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base. + public let knowledgeBaseId: String + /// A key/value map to store attributes without affecting tagging or recommendations. + /// For example, when synchronizing data between an external system and Amazon Q in Connect, you can store an external version identifier as metadata to utilize for determining drift. + public let metadata: [String: String] + /// The name of the content. + public let name: String + /// The identifier of the revision of the content. + public let revisionId: String + /// The status of the content. + public let status: ContentStatus + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + /// The title of the content. + public let title: String + + @inlinable + public init(contentArn: String, contentId: String, contentType: String, knowledgeBaseArn: String, knowledgeBaseId: String, metadata: [String: String], name: String, revisionId: String, status: ContentStatus, tags: [String: String]? = nil, title: String) { + self.contentArn = contentArn + self.contentId = contentId + self.contentType = contentType + self.knowledgeBaseArn = knowledgeBaseArn + self.knowledgeBaseId = knowledgeBaseId + self.metadata = metadata + self.name = name + self.revisionId = revisionId + self.status = status + self.tags = tags + self.title = title + } + + private enum CodingKeys: String, CodingKey { + case contentArn = "contentArn" + case contentId = "contentId" + case contentType = "contentType" + case knowledgeBaseArn = "knowledgeBaseArn" + case knowledgeBaseId = "knowledgeBaseId" + case metadata = "metadata" + case name = "name" + case revisionId = "revisionId" + case status = "status" + case tags = "tags" + case title = "title" + } + } + + public struct CreateAIAgentRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The configuration of the AI Agent. + public let configuration: AIAgentConfiguration + /// The description of the AI Agent. + public let description: String? + /// The name of the AI Agent. + public let name: String + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + /// The type of the AI Agent. + public let type: AIAgentType + /// The visibility status of the AI Agent. + public let visibilityStatus: VisibilityStatus + + @inlinable + public init(assistantId: String, clientToken: String? = CreateAIAgentRequest.idempotencyToken(), configuration: AIAgentConfiguration, description: String? = nil, name: String, tags: [String: String]? = nil, type: AIAgentType, visibilityStatus: VisibilityStatus) { + self.assistantId = assistantId + self.clientToken = clientToken + self.configuration = configuration + self.description = description + self.name = name + self.tags = tags + self.type = type + self.visibilityStatus = visibilityStatus + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assistantId, key: "assistantId") + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encode(self.configuration, forKey: .configuration) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.tags, forKey: .tags) + try container.encode(self.type, forKey: .type) + try container.encode(self.visibilityStatus, forKey: .visibilityStatus) + } + + public func validate(name: String) throws { + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.configuration.validate(name: "\(name).configuration") + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, pattern: "^[a-zA-Z0-9\\s_.,-]+") + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9\\s_.,-]+") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case configuration = "configuration" + case description = "description" + case name = "name" + case tags = "tags" + case type = "type" + case visibilityStatus = "visibilityStatus" + } + } + + public struct CreateAIAgentResponse: AWSDecodableShape { + /// The data of the created AI Agent. + public let aiAgent: AIAgentData? + + @inlinable + public init(aiAgent: AIAgentData? = nil) { + self.aiAgent = aiAgent + } + + private enum CodingKeys: String, CodingKey { + case aiAgent = "aiAgent" + } + } + + public struct CreateAIAgentVersionRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Agent. + public let aiAgentId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The modification time of the AI Agent should be tracked for version creation. This field should be specified to avoid version creation when simultaneous update to the underlying AI Agent are possible. The value should be the modifiedTime returned from the request to create or update an AI Agent so that version creation can fail if an update to the AI Agent post the specified modification time has been made. + public let modifiedTime: Date? + + @inlinable + public init(aiAgentId: String, assistantId: String, clientToken: String? = CreateAIAgentVersionRequest.idempotencyToken(), modifiedTime: Date? = nil) { + self.aiAgentId = aiAgentId + self.assistantId = assistantId + self.clientToken = clientToken + self.modifiedTime = modifiedTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiAgentId, key: "aiAgentId") + request.encodePath(self.assistantId, key: "assistantId") + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.modifiedTime, forKey: .modifiedTime) + } + + public func validate(name: String) throws { + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case modifiedTime = "modifiedTime" + } + } + + public struct CreateAIAgentVersionResponse: AWSDecodableShape { + /// The data of the AI Agent version. + public let aiAgent: AIAgentData? + /// The version number of the AI Agent version. + public let versionNumber: Int64? + + @inlinable + public init(aiAgent: AIAgentData? = nil, versionNumber: Int64? = nil) { + self.aiAgent = aiAgent + self.versionNumber = versionNumber + } + + private enum CodingKeys: String, CodingKey { + case aiAgent = "aiAgent" + case versionNumber = "versionNumber" + } + } + + public struct CreateAIPromptRequest: AWSEncodableShape { + /// The API Format of the AI Prompt. + public let apiFormat: AIPromptAPIFormat + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The description of the AI Prompt. + public let description: String? + /// The identifier of the model used for this AI Prompt. Model Ids supported are: CLAUDE_3_HAIKU_20240307_V1 + public let modelId: String + /// The name of the AI Prompt. + public let name: String + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String]? + /// The configuration of the prompt template for this AI Prompt. + public let templateConfiguration: AIPromptTemplateConfiguration + /// The type of the prompt template for this AI Prompt. + public let templateType: AIPromptTemplateType + /// The type of this AI Prompt. + public let type: AIPromptType + /// The visibility status of the AI Prompt. + public let visibilityStatus: VisibilityStatus + + @inlinable + public init(apiFormat: AIPromptAPIFormat, assistantId: String, clientToken: String? = CreateAIPromptRequest.idempotencyToken(), description: String? = nil, modelId: String, name: String, tags: [String: String]? = nil, templateConfiguration: AIPromptTemplateConfiguration, templateType: AIPromptTemplateType, type: AIPromptType, visibilityStatus: VisibilityStatus) { + self.apiFormat = apiFormat + self.assistantId = assistantId + self.clientToken = clientToken + self.description = description + self.modelId = modelId + self.name = name + self.tags = tags + self.templateConfiguration = templateConfiguration + self.templateType = templateType + self.type = type + self.visibilityStatus = visibilityStatus + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.apiFormat, forKey: .apiFormat) + request.encodePath(self.assistantId, key: "assistantId") + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.modelId, forKey: .modelId) + try container.encode(self.name, forKey: .name) + try container.encodeIfPresent(self.tags, forKey: .tags) + try container.encode(self.templateConfiguration, forKey: .templateConfiguration) + try container.encode(self.templateType, forKey: .templateType) + try container.encode(self.type, forKey: .type) + try container.encode(self.visibilityStatus, forKey: .visibilityStatus) + } + + public func validate(name: String) throws { + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, pattern: "^[a-zA-Z0-9\\s_.,-]+") + try self.validate(self.modelId, name: "modelId", parent: name, max: 2048) + try self.validate(self.modelId, name: "modelId", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 255) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9\\s_.,-]+") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, min: 1) + } + try self.templateConfiguration.validate(name: "\(name).templateConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case apiFormat = "apiFormat" + case clientToken = "clientToken" + case description = "description" + case modelId = "modelId" + case name = "name" + case tags = "tags" + case templateConfiguration = "templateConfiguration" + case templateType = "templateType" + case type = "type" + case visibilityStatus = "visibilityStatus" + } + } + + public struct CreateAIPromptResponse: AWSDecodableShape { + /// The data of the AI Prompt. + public let aiPrompt: AIPromptData? + + @inlinable + public init(aiPrompt: AIPromptData? = nil) { + self.aiPrompt = aiPrompt + } + + private enum CodingKeys: String, CodingKey { + case aiPrompt = "aiPrompt" + } + } + + public struct CreateAIPromptVersionRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI prompt. + public let aiPromptId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The time the AI Prompt was last modified. + public let modifiedTime: Date? @inlinable - public init(contentArn: String? = nil, contentId: String? = nil, knowledgeBaseArn: String? = nil, knowledgeBaseId: String? = nil) { - self.contentArn = contentArn - self.contentId = contentId - self.knowledgeBaseArn = knowledgeBaseArn - self.knowledgeBaseId = knowledgeBaseId + public init(aiPromptId: String, assistantId: String, clientToken: String? = CreateAIPromptVersionRequest.idempotencyToken(), modifiedTime: Date? = nil) { + self.aiPromptId = aiPromptId + self.assistantId = assistantId + self.clientToken = clientToken + self.modifiedTime = modifiedTime + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiPromptId, key: "aiPromptId") + request.encodePath(self.assistantId, key: "assistantId") + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.modifiedTime, forKey: .modifiedTime) + } + + public func validate(name: String) throws { + try self.validate(self.aiPromptId, name: "aiPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { - case contentArn = "contentArn" - case contentId = "contentId" - case knowledgeBaseArn = "knowledgeBaseArn" - case knowledgeBaseId = "knowledgeBaseId" + case clientToken = "clientToken" + case modifiedTime = "modifiedTime" } } - public struct ContentSummary: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the content. - public let contentArn: String - /// The identifier of the content. - public let contentId: String - /// The media type of the content. - public let contentType: String - /// The Amazon Resource Name (ARN) of the knowledge base. - public let knowledgeBaseArn: String - /// The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base. - public let knowledgeBaseId: String - /// A key/value map to store attributes without affecting tagging or recommendations. - /// For example, when synchronizing data between an external system and Amazon Q in Connect, you can store an external version identifier as metadata to utilize for determining drift. - public let metadata: [String: String] - /// The name of the content. - public let name: String - /// The identifier of the revision of the content. - public let revisionId: String - /// The status of the content. - public let status: ContentStatus - /// The tags used to organize, track, or control access for this resource. - public let tags: [String: String]? - /// The title of the content. - public let title: String + public struct CreateAIPromptVersionResponse: AWSDecodableShape { + /// The data of the AI Prompt version. + public let aiPrompt: AIPromptData? + /// The version number of the AI Prompt version. + public let versionNumber: Int64? @inlinable - public init(contentArn: String, contentId: String, contentType: String, knowledgeBaseArn: String, knowledgeBaseId: String, metadata: [String: String], name: String, revisionId: String, status: ContentStatus, tags: [String: String]? = nil, title: String) { - self.contentArn = contentArn - self.contentId = contentId - self.contentType = contentType - self.knowledgeBaseArn = knowledgeBaseArn - self.knowledgeBaseId = knowledgeBaseId - self.metadata = metadata - self.name = name - self.revisionId = revisionId - self.status = status - self.tags = tags - self.title = title + public init(aiPrompt: AIPromptData? = nil, versionNumber: Int64? = nil) { + self.aiPrompt = aiPrompt + self.versionNumber = versionNumber } private enum CodingKeys: String, CodingKey { - case contentArn = "contentArn" - case contentId = "contentId" - case contentType = "contentType" - case knowledgeBaseArn = "knowledgeBaseArn" - case knowledgeBaseId = "knowledgeBaseId" - case metadata = "metadata" - case name = "name" - case revisionId = "revisionId" - case status = "status" - case tags = "tags" - case title = "title" + case aiPrompt = "aiPrompt" + case versionNumber = "versionNumber" } } @@ -1249,9 +2267,11 @@ extension QConnect { public let sourceConfiguration: SourceConfiguration? /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? + /// Contains details about how to ingest the documents in a data source. + public let vectorIngestionConfiguration: VectorIngestionConfiguration? @inlinable - public init(clientToken: String? = CreateKnowledgeBaseRequest.idempotencyToken(), description: String? = nil, knowledgeBaseType: KnowledgeBaseType, name: String, renderingConfiguration: RenderingConfiguration? = nil, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, tags: [String: String]? = nil) { + public init(clientToken: String? = CreateKnowledgeBaseRequest.idempotencyToken(), description: String? = nil, knowledgeBaseType: KnowledgeBaseType, name: String, renderingConfiguration: RenderingConfiguration? = nil, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, tags: [String: String]? = nil, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { self.clientToken = clientToken self.description = description self.knowledgeBaseType = knowledgeBaseType @@ -1260,6 +2280,7 @@ extension QConnect { self.serverSideEncryptionConfiguration = serverSideEncryptionConfiguration self.sourceConfiguration = sourceConfiguration self.tags = tags + self.vectorIngestionConfiguration = vectorIngestionConfiguration } public func validate(name: String) throws { @@ -1281,6 +2302,7 @@ extension QConnect { try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, min: 1) } + try self.vectorIngestionConfiguration?.validate(name: "\(name).vectorIngestionConfiguration") } private enum CodingKeys: String, CodingKey { @@ -1292,6 +2314,7 @@ extension QConnect { case serverSideEncryptionConfiguration = "serverSideEncryptionConfiguration" case sourceConfiguration = "sourceConfiguration" case tags = "tags" + case vectorIngestionConfiguration = "vectorIngestionConfiguration" } } @@ -1426,6 +2449,8 @@ extension QConnect { } public struct CreateSessionRequest: AWSEncodableShape { + /// The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that should be used by Amazon Q in Connect for this Session. + public let aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantId: String /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. @@ -1440,7 +2465,8 @@ extension QConnect { public let tags: [String: String]? @inlinable - public init(assistantId: String, clientToken: String? = CreateSessionRequest.idempotencyToken(), description: String? = nil, name: String, tagFilter: TagFilter? = nil, tags: [String: String]? = nil) { + public init(aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, assistantId: String, clientToken: String? = CreateSessionRequest.idempotencyToken(), description: String? = nil, name: String, tagFilter: TagFilter? = nil, tags: [String: String]? = nil) { + self.aiAgentConfiguration = aiAgentConfiguration self.assistantId = assistantId self.clientToken = clientToken self.description = description @@ -1452,6 +2478,7 @@ extension QConnect { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.aiAgentConfiguration, forKey: .aiAgentConfiguration) request.encodePath(self.assistantId, key: "assistantId") try container.encodeIfPresent(self.clientToken, forKey: .clientToken) try container.encodeIfPresent(self.description, forKey: .description) @@ -1461,6 +2488,9 @@ extension QConnect { } public func validate(name: String) throws { + try self.aiAgentConfiguration?.forEach { + try $0.value.validate(name: "\(name).aiAgentConfiguration[\"\($0.key)\"]") + } try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) @@ -1481,6 +2511,7 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { + case aiAgentConfiguration = "aiAgentConfiguration" case clientToken = "clientToken" case description = "description" case name = "name" @@ -1521,6 +2552,140 @@ extension QConnect { } } + public struct DeleteAIAgentRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Agent. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let aiAgentId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + + @inlinable + public init(aiAgentId: String, assistantId: String) { + self.aiAgentId = aiAgentId + self.assistantId = assistantId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiAgentId, key: "aiAgentId") + request.encodePath(self.assistantId, key: "assistantId") + } + + public func validate(name: String) throws { + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAIAgentResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteAIAgentVersionRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Agent. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let aiAgentId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The version number of the AI Agent version. + public let versionNumber: Int64 + + @inlinable + public init(aiAgentId: String, assistantId: String, versionNumber: Int64) { + self.aiAgentId = aiAgentId + self.assistantId = assistantId + self.versionNumber = versionNumber + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiAgentId, key: "aiAgentId") + request.encodePath(self.assistantId, key: "assistantId") + request.encodePath(self.versionNumber, key: "versionNumber") + } + + public func validate(name: String) throws { + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.versionNumber, name: "versionNumber", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAIAgentVersionResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteAIPromptRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI prompt. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let aiPromptId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + + @inlinable + public init(aiPromptId: String, assistantId: String) { + self.aiPromptId = aiPromptId + self.assistantId = assistantId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiPromptId, key: "aiPromptId") + request.encodePath(self.assistantId, key: "assistantId") + } + + public func validate(name: String) throws { + try self.validate(self.aiPromptId, name: "aiPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAIPromptResponse: AWSDecodableShape { + public init() {} + } + + public struct DeleteAIPromptVersionRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI prompt. + public let aiPromptId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The version number of the AI Prompt version to be deleted. + public let versionNumber: Int64 + + @inlinable + public init(aiPromptId: String, assistantId: String, versionNumber: Int64) { + self.aiPromptId = aiPromptId + self.assistantId = assistantId + self.versionNumber = versionNumber + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiPromptId, key: "aiPromptId") + request.encodePath(self.assistantId, key: "assistantId") + request.encodePath(self.versionNumber, key: "versionNumber") + } + + public func validate(name: String) throws { + try self.validate(self.aiPromptId, name: "aiPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.versionNumber, name: "versionNumber", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteAIPromptVersionResponse: AWSDecodableShape { + public init() {} + } + public struct DeleteAssistantAssociationRequest: AWSEncodableShape { /// The identifier of the assistant association. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantAssociationId: String @@ -1822,6 +2987,24 @@ extension QConnect { } } + public struct FixedSizeChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The maximum number of tokens to include in a chunk. + public let maxTokens: Int + /// The percentage of overlap between adjacent chunks of a data source. + public let overlapPercentage: Int + + @inlinable + public init(maxTokens: Int, overlapPercentage: Int) { + self.maxTokens = maxTokens + self.overlapPercentage = overlapPercentage + } + + private enum CodingKeys: String, CodingKey { + case maxTokens = "maxTokens" + case overlapPercentage = "overlapPercentage" + } + } + public struct GenerativeContentFeedbackData: AWSEncodableShape & AWSDecodableShape { /// The relevance of the feedback. public let relevance: Relevance @@ -1876,6 +3059,96 @@ extension QConnect { } } + public struct GetAIAgentRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Agent (with or without a version qualifier). Can be either the ID or the ARN. URLs cannot contain the ARN. + public let aiAgentId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + + @inlinable + public init(aiAgentId: String, assistantId: String) { + self.aiAgentId = aiAgentId + self.assistantId = assistantId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiAgentId, key: "aiAgentId") + request.encodePath(self.assistantId, key: "assistantId") + } + + public func validate(name: String) throws { + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetAIAgentResponse: AWSDecodableShape { + /// The data of the AI Agent. + public let aiAgent: AIAgentData? + /// The version number of the AI Agent version (returned if an AI Agent version was specified via use of a qualifier for the aiAgentId on the request). + public let versionNumber: Int64? + + @inlinable + public init(aiAgent: AIAgentData? = nil, versionNumber: Int64? = nil) { + self.aiAgent = aiAgent + self.versionNumber = versionNumber + } + + private enum CodingKeys: String, CodingKey { + case aiAgent = "aiAgent" + case versionNumber = "versionNumber" + } + } + + public struct GetAIPromptRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI prompt. + public let aiPromptId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + + @inlinable + public init(aiPromptId: String, assistantId: String) { + self.aiPromptId = aiPromptId + self.assistantId = assistantId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiPromptId, key: "aiPromptId") + request.encodePath(self.assistantId, key: "assistantId") + } + + public func validate(name: String) throws { + try self.validate(self.aiPromptId, name: "aiPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetAIPromptResponse: AWSDecodableShape { + /// The data of the AI Prompt. + public let aiPrompt: AIPromptData? + /// The version number of the AI Prompt version (returned if an AI Prompt version was specified via use of a qualifier for the aiPromptId on the request). + public let versionNumber: Int64? + + @inlinable + public init(aiPrompt: AIPromptData? = nil, versionNumber: Int64? = nil) { + self.aiPrompt = aiPrompt + self.versionNumber = versionNumber + } + + private enum CodingKeys: String, CodingKey { + case aiPrompt = "aiPrompt" + case versionNumber = "versionNumber" + } + } + public struct GetAssistantAssociationRequest: AWSEncodableShape { /// The identifier of the assistant association. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantAssociationId: String @@ -2309,18 +3582,55 @@ extension QConnect { self.values = values } - public func validate(name: String) throws { - try self.validate(self.criteria, name: "criteria", parent: name, max: 100) - try self.validate(self.criteria, name: "criteria", parent: name, min: 1) - try self.values?.forEach { - try validate($0, name: "values[]", parent: name, max: 2048) - try validate($0, name: "values[]", parent: name, min: 1) - } + public func validate(name: String) throws { + try self.validate(self.criteria, name: "criteria", parent: name, max: 100) + try self.validate(self.criteria, name: "criteria", parent: name, min: 1) + try self.values?.forEach { + try validate($0, name: "values[]", parent: name, max: 2048) + try validate($0, name: "values[]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case criteria = "criteria" + case values = "values" + } + } + + public struct HierarchicalChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Token settings for each layer. + public let levelConfigurations: [HierarchicalChunkingLevelConfiguration] + /// The number of tokens to repeat across chunks in the same layer. + public let overlapTokens: Int + + @inlinable + public init(levelConfigurations: [HierarchicalChunkingLevelConfiguration], overlapTokens: Int) { + self.levelConfigurations = levelConfigurations + self.overlapTokens = overlapTokens + } + + public func validate(name: String) throws { + try self.validate(self.levelConfigurations, name: "levelConfigurations", parent: name, max: 2) + try self.validate(self.levelConfigurations, name: "levelConfigurations", parent: name, min: 2) + } + + private enum CodingKeys: String, CodingKey { + case levelConfigurations = "levelConfigurations" + case overlapTokens = "overlapTokens" + } + } + + public struct HierarchicalChunkingLevelConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The maximum number of tokens that a chunk can contain in this layer. + public let maxTokens: Int + + @inlinable + public init(maxTokens: Int) { + self.maxTokens = maxTokens } private enum CodingKeys: String, CodingKey { - case criteria = "criteria" - case values = "values" + case maxTokens = "maxTokens" } } @@ -2453,6 +3763,69 @@ extension QConnect { } } + public struct IntentDetectedDataDetails: AWSDecodableShape { + /// The detected intent. + public let intent: String + /// The identifier of the detected intent. + public let intentId: String + + @inlinable + public init(intent: String, intentId: String) { + self.intent = intent + self.intentId = intentId + } + + private enum CodingKeys: String, CodingKey { + case intent = "intent" + case intentId = "intentId" + } + } + + public struct IntentInputData: AWSEncodableShape { + /// The identifier of the Amazon Q intent. + public let intentId: String + + @inlinable + public init(intentId: String) { + self.intentId = intentId + } + + public func validate(name: String) throws { + try self.validate(self.intentId, name: "intentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case intentId = "intentId" + } + } + + public struct KnowledgeBaseAssociationConfigurationData: AWSEncodableShape & AWSDecodableShape { + public let contentTagFilter: TagFilter? + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The search type to be used against the Knowledge Base for this request. The values can be SEMANTIC which uses vector embeddings or HYBRID which use vector embeddings and raw text + public let overrideKnowledgeBaseSearchType: KnowledgeBaseSearchType? + + @inlinable + public init(contentTagFilter: TagFilter? = nil, maxResults: Int? = nil, overrideKnowledgeBaseSearchType: KnowledgeBaseSearchType? = nil) { + self.contentTagFilter = contentTagFilter + self.maxResults = maxResults + self.overrideKnowledgeBaseSearchType = overrideKnowledgeBaseSearchType + } + + public func validate(name: String) throws { + try self.contentTagFilter?.validate(name: "\(name).contentTagFilter") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case contentTagFilter = "contentTagFilter" + case maxResults = "maxResults" + case overrideKnowledgeBaseSearchType = "overrideKnowledgeBaseSearchType" + } + } + public struct KnowledgeBaseAssociationData: AWSDecodableShape { /// The Amazon Resource Name (ARN) of the knowledge base. public let knowledgeBaseArn: String? @@ -2474,6 +3847,10 @@ extension QConnect { public struct KnowledgeBaseData: AWSDecodableShape { /// The description. public let description: String? + /// List of failure reasons on ingestion per file. + public let ingestionFailureReasons: [String]? + /// Status of ingestion on data source. + public let ingestionStatus: SyncStatus? /// The Amazon Resource Name (ARN) of the knowledge base. public let knowledgeBaseArn: String /// The identifier of the knowledge base. @@ -2494,10 +3871,14 @@ extension QConnect { public let status: KnowledgeBaseStatus /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? + /// Contains details about how to ingest the documents in a data source. + public let vectorIngestionConfiguration: VectorIngestionConfiguration? @inlinable - public init(description: String? = nil, knowledgeBaseArn: String, knowledgeBaseId: String, knowledgeBaseType: KnowledgeBaseType, lastContentModificationTime: Date? = nil, name: String, renderingConfiguration: RenderingConfiguration? = nil, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, status: KnowledgeBaseStatus, tags: [String: String]? = nil) { + public init(description: String? = nil, ingestionFailureReasons: [String]? = nil, ingestionStatus: SyncStatus? = nil, knowledgeBaseArn: String, knowledgeBaseId: String, knowledgeBaseType: KnowledgeBaseType, lastContentModificationTime: Date? = nil, name: String, renderingConfiguration: RenderingConfiguration? = nil, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, status: KnowledgeBaseStatus, tags: [String: String]? = nil, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { self.description = description + self.ingestionFailureReasons = ingestionFailureReasons + self.ingestionStatus = ingestionStatus self.knowledgeBaseArn = knowledgeBaseArn self.knowledgeBaseId = knowledgeBaseId self.knowledgeBaseType = knowledgeBaseType @@ -2508,10 +3889,13 @@ extension QConnect { self.sourceConfiguration = sourceConfiguration self.status = status self.tags = tags + self.vectorIngestionConfiguration = vectorIngestionConfiguration } private enum CodingKeys: String, CodingKey { case description = "description" + case ingestionFailureReasons = "ingestionFailureReasons" + case ingestionStatus = "ingestionStatus" case knowledgeBaseArn = "knowledgeBaseArn" case knowledgeBaseId = "knowledgeBaseId" case knowledgeBaseType = "knowledgeBaseType" @@ -2522,6 +3906,7 @@ extension QConnect { case sourceConfiguration = "sourceConfiguration" case status = "status" case tags = "tags" + case vectorIngestionConfiguration = "vectorIngestionConfiguration" } } @@ -2546,9 +3931,11 @@ extension QConnect { public let status: KnowledgeBaseStatus /// The tags used to organize, track, or control access for this resource. public let tags: [String: String]? + /// Contains details about how to ingest the documents in a data source. + public let vectorIngestionConfiguration: VectorIngestionConfiguration? @inlinable - public init(description: String? = nil, knowledgeBaseArn: String, knowledgeBaseId: String, knowledgeBaseType: KnowledgeBaseType, name: String, renderingConfiguration: RenderingConfiguration? = nil, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, status: KnowledgeBaseStatus, tags: [String: String]? = nil) { + public init(description: String? = nil, knowledgeBaseArn: String, knowledgeBaseId: String, knowledgeBaseType: KnowledgeBaseType, name: String, renderingConfiguration: RenderingConfiguration? = nil, serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration? = nil, sourceConfiguration: SourceConfiguration? = nil, status: KnowledgeBaseStatus, tags: [String: String]? = nil, vectorIngestionConfiguration: VectorIngestionConfiguration? = nil) { self.description = description self.knowledgeBaseArn = knowledgeBaseArn self.knowledgeBaseId = knowledgeBaseId @@ -2559,6 +3946,7 @@ extension QConnect { self.sourceConfiguration = sourceConfiguration self.status = status self.tags = tags + self.vectorIngestionConfiguration = vectorIngestionConfiguration } private enum CodingKeys: String, CodingKey { @@ -2572,6 +3960,241 @@ extension QConnect { case sourceConfiguration = "sourceConfiguration" case status = "status" case tags = "tags" + case vectorIngestionConfiguration = "vectorIngestionConfiguration" + } + } + + public struct ListAIAgentVersionsRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Agent for which versions are to be listed. + public let aiAgentId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The origin of the AI Agent versions to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + public let origin: Origin? + + @inlinable + public init(aiAgentId: String, assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, origin: Origin? = nil) { + self.aiAgentId = aiAgentId + self.assistantId = assistantId + self.maxResults = maxResults + self.nextToken = nextToken + self.origin = origin + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiAgentId, key: "aiAgentId") + request.encodePath(self.assistantId, key: "assistantId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.origin, key: "origin") + } + + public func validate(name: String) throws { + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAIAgentVersionsResponse: AWSDecodableShape { + /// The summaries of AI Agent versions. + public let aiAgentVersionSummaries: [AIAgentVersionSummary] + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + + @inlinable + public init(aiAgentVersionSummaries: [AIAgentVersionSummary], nextToken: String? = nil) { + self.aiAgentVersionSummaries = aiAgentVersionSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case aiAgentVersionSummaries = "aiAgentVersionSummaries" + case nextToken = "nextToken" + } + } + + public struct ListAIAgentsRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The origin of the AI Agents to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + public let origin: Origin? + + @inlinable + public init(assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, origin: Origin? = nil) { + self.assistantId = assistantId + self.maxResults = maxResults + self.nextToken = nextToken + self.origin = origin + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assistantId, key: "assistantId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.origin, key: "origin") + } + + public func validate(name: String) throws { + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAIAgentsResponse: AWSDecodableShape { + /// The summaries of AI Agents. + public let aiAgentSummaries: [AIAgentSummary] + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + + @inlinable + public init(aiAgentSummaries: [AIAgentSummary], nextToken: String? = nil) { + self.aiAgentSummaries = aiAgentSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case aiAgentSummaries = "aiAgentSummaries" + case nextToken = "nextToken" + } + } + + public struct ListAIPromptVersionsRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI prompt for which versions are to be listed. + public let aiPromptId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The origin of the AI Prompt versions to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + public let origin: Origin? + + @inlinable + public init(aiPromptId: String, assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, origin: Origin? = nil) { + self.aiPromptId = aiPromptId + self.assistantId = assistantId + self.maxResults = maxResults + self.nextToken = nextToken + self.origin = origin + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiPromptId, key: "aiPromptId") + request.encodePath(self.assistantId, key: "assistantId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.origin, key: "origin") + } + + public func validate(name: String) throws { + try self.validate(self.aiPromptId, name: "aiPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAIPromptVersionsResponse: AWSDecodableShape { + /// The summaries of the AI Prompt versions. + public let aiPromptVersionSummaries: [AIPromptVersionSummary] + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + + @inlinable + public init(aiPromptVersionSummaries: [AIPromptVersionSummary], nextToken: String? = nil) { + self.aiPromptVersionSummaries = aiPromptVersionSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case aiPromptVersionSummaries = "aiPromptVersionSummaries" + case nextToken = "nextToken" + } + } + + public struct ListAIPromptsRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The maximum number of results to return per page. + public let maxResults: Int? + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + /// The origin of the AI Prompts to be listed. SYSTEM for a default AI Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation APIs. + public let origin: Origin? + + @inlinable + public init(assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, origin: Origin? = nil) { + self.assistantId = assistantId + self.maxResults = maxResults + self.nextToken = nextToken + self.origin = origin + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assistantId, key: "assistantId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodeQuery(self.origin, key: "origin") + } + + public func validate(name: String) throws { + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListAIPromptsResponse: AWSDecodableShape { + /// The summaries of the AI Prompts. + public let aiPromptSummaries: [AIPromptSummary] + /// The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. + public let nextToken: String? + + @inlinable + public init(aiPromptSummaries: [AIPromptSummary], nextToken: String? = nil) { + self.aiPromptSummaries = aiPromptSummaries + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case aiPromptSummaries = "aiPromptSummaries" + case nextToken = "nextToken" } } @@ -2979,6 +4602,31 @@ extension QConnect { } } + public struct ManualSearchAIAgentConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The AI Prompt identifier for the Answer Generation prompt used by the MANUAL_SEARCH AI Agent. + public let answerGenerationAIPromptId: String? + /// The association configurations for overriding behavior on this AI Agent. + public let associationConfigurations: [AssociationConfiguration]? + + @inlinable + public init(answerGenerationAIPromptId: String? = nil, associationConfigurations: [AssociationConfiguration]? = nil) { + self.answerGenerationAIPromptId = answerGenerationAIPromptId + self.associationConfigurations = associationConfigurations + } + + public func validate(name: String) throws { + try self.validate(self.answerGenerationAIPromptId, name: "answerGenerationAIPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$") + try self.associationConfigurations?.forEach { + try $0.validate(name: "\(name).associationConfigurations[]") + } + } + + private enum CodingKeys: String, CodingKey { + case answerGenerationAIPromptId = "answerGenerationAIPromptId" + case associationConfigurations = "associationConfigurations" + } + } + public struct NotifyRecommendationsReceivedError: AWSDecodableShape { /// A recommendation is causing an error. public let message: String? @@ -3031,25 +4679,66 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { - case recommendationIds = "recommendationIds" + case recommendationIds = "recommendationIds" + } + } + + public struct NotifyRecommendationsReceivedResponse: AWSDecodableShape { + /// The identifiers of recommendations that are causing errors. + public let errors: [NotifyRecommendationsReceivedError]? + /// The identifiers of the recommendations. + public let recommendationIds: [String]? + + @inlinable + public init(errors: [NotifyRecommendationsReceivedError]? = nil, recommendationIds: [String]? = nil) { + self.errors = errors + self.recommendationIds = recommendationIds + } + + private enum CodingKeys: String, CodingKey { + case errors = "errors" + case recommendationIds = "recommendationIds" + } + } + + public struct ParsingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Settings for a foundation model used to parse documents for a data source. + public let bedrockFoundationModelConfiguration: BedrockFoundationModelConfigurationForParsing? + /// The parsing strategy for the data source. + public let parsingStrategy: ParsingStrategy + + @inlinable + public init(bedrockFoundationModelConfiguration: BedrockFoundationModelConfigurationForParsing? = nil, parsingStrategy: ParsingStrategy) { + self.bedrockFoundationModelConfiguration = bedrockFoundationModelConfiguration + self.parsingStrategy = parsingStrategy + } + + public func validate(name: String) throws { + try self.bedrockFoundationModelConfiguration?.validate(name: "\(name).bedrockFoundationModelConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case bedrockFoundationModelConfiguration = "bedrockFoundationModelConfiguration" + case parsingStrategy = "parsingStrategy" } } - public struct NotifyRecommendationsReceivedResponse: AWSDecodableShape { - /// The identifiers of recommendations that are causing errors. - public let errors: [NotifyRecommendationsReceivedError]? - /// The identifiers of the recommendations. - public let recommendationIds: [String]? + public struct ParsingPrompt: AWSEncodableShape & AWSDecodableShape { + /// Instructions for interpreting the contents of a document. + public let parsingPromptText: String @inlinable - public init(errors: [NotifyRecommendationsReceivedError]? = nil, recommendationIds: [String]? = nil) { - self.errors = errors - self.recommendationIds = recommendationIds + public init(parsingPromptText: String) { + self.parsingPromptText = parsingPromptText + } + + public func validate(name: String) throws { + try self.validate(self.parsingPromptText, name: "parsingPromptText", parent: name, max: 10000) + try self.validate(self.parsingPromptText, name: "parsingPromptText", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { - case errors = "errors" - case recommendationIds = "recommendationIds" + case parsingPromptText = "parsingPromptText" } } @@ -3130,19 +4819,25 @@ extension QConnect { /// The token for the next set of results. Use the value returned in the previous /// response in the next request to retrieve the next set of results. public let nextToken: String? + /// The search type to be used against the Knowledge Base for this request. The values can be SEMANTIC which uses vector embeddings or HYBRID which use vector embeddings and raw text. + public let overrideKnowledgeBaseSearchType: KnowledgeBaseSearchType? /// Information about how to query content. public let queryCondition: [QueryCondition]? + /// Information about the query. + public let queryInputData: QueryInputData? /// The text to search for. - public let queryText: String + public let queryText: String? /// The identifier of the Amazon Q in Connect session. Can be either the ID or the ARN. URLs cannot contain the ARN. public let sessionId: String? @inlinable - public init(assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, queryCondition: [QueryCondition]? = nil, queryText: String, sessionId: String? = nil) { + public init(assistantId: String, maxResults: Int? = nil, nextToken: String? = nil, overrideKnowledgeBaseSearchType: KnowledgeBaseSearchType? = nil, queryCondition: [QueryCondition]? = nil, queryInputData: QueryInputData? = nil, queryText: String? = nil, sessionId: String? = nil) { self.assistantId = assistantId self.maxResults = maxResults self.nextToken = nextToken + self.overrideKnowledgeBaseSearchType = overrideKnowledgeBaseSearchType self.queryCondition = queryCondition + self.queryInputData = queryInputData self.queryText = queryText self.sessionId = sessionId } @@ -3153,8 +4848,10 @@ extension QConnect { request.encodePath(self.assistantId, key: "assistantId") try container.encodeIfPresent(self.maxResults, forKey: .maxResults) try container.encodeIfPresent(self.nextToken, forKey: .nextToken) + try container.encodeIfPresent(self.overrideKnowledgeBaseSearchType, forKey: .overrideKnowledgeBaseSearchType) try container.encodeIfPresent(self.queryCondition, forKey: .queryCondition) - try container.encode(self.queryText, forKey: .queryText) + try container.encodeIfPresent(self.queryInputData, forKey: .queryInputData) + try container.encodeIfPresent(self.queryText, forKey: .queryText) try container.encodeIfPresent(self.sessionId, forKey: .sessionId) } @@ -3168,14 +4865,17 @@ extension QConnect { try $0.validate(name: "\(name).queryCondition[]") } try self.validate(self.queryCondition, name: "queryCondition", parent: name, max: 1) - try self.validate(self.queryText, name: "queryText", parent: name, max: 1024) + try self.queryInputData?.validate(name: "\(name).queryInputData") + try self.validate(self.queryText, name: "queryText", parent: name, max: 512) try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") } private enum CodingKeys: String, CodingKey { case maxResults = "maxResults" case nextToken = "nextToken" + case overrideKnowledgeBaseSearchType = "overrideKnowledgeBaseSearchType" case queryCondition = "queryCondition" + case queryInputData = "queryInputData" case queryText = "queryText" case sessionId = "sessionId" } @@ -3240,6 +4940,24 @@ extension QConnect { } } + public struct QueryTextInputData: AWSEncodableShape { + /// The text to search for. + public let text: String + + @inlinable + public init(text: String) { + self.text = text + } + + public func validate(name: String) throws { + try self.validate(self.text, name: "text", parent: name, max: 512) + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + public struct QuickResponseContents: AWSDecodableShape { public let markdown: QuickResponseContentProvider? public let plainText: QuickResponseContentProvider? @@ -3711,6 +5429,36 @@ extension QConnect { } } + public struct RemoveAssistantAIAgentRequest: AWSEncodableShape { + /// The type of the AI Agent being removed for use by default from the Amazon Q in Connect Assistant. + public let aiAgentType: AIAgentType + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + + @inlinable + public init(aiAgentType: AIAgentType, assistantId: String) { + self.aiAgentType = aiAgentType + self.assistantId = assistantId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.aiAgentType, key: "aiAgentType") + request.encodePath(self.assistantId, key: "assistantId") + } + + public func validate(name: String) throws { + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct RemoveAssistantAIAgentResponse: AWSDecodableShape { + public init() {} + } + public struct RemoveKnowledgeBaseTemplateUriRequest: AWSEncodableShape { /// The identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN. public let knowledgeBaseId: String @@ -3786,6 +5534,30 @@ extension QConnect { } } + public struct RuntimeSessionData: AWSEncodableShape & AWSDecodableShape { + /// The key of the data stored on the session. + public let key: String + /// The value of the data stored on the session. + public let value: RuntimeSessionDataValue + + @inlinable + public init(key: String, value: RuntimeSessionDataValue) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 4096) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.value.validate(name: "\(name).value") + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case value = "value" + } + } + public struct SearchContentRequest: AWSEncodableShape { /// The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN. public let knowledgeBaseId: String @@ -3992,6 +5764,46 @@ extension QConnect { } } + public struct SeedUrl: AWSEncodableShape & AWSDecodableShape { + /// URL for crawling + public let url: String? + + @inlinable + public init(url: String? = nil) { + self.url = url + } + + public func validate(name: String) throws { + try self.validate(self.url, name: "url", parent: name, pattern: "^https?://[A-Za-z0-9][^\\s]*$") + } + + private enum CodingKeys: String, CodingKey { + case url = "url" + } + } + + public struct SemanticChunkingConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The dissimilarity threshold for splitting chunks. + public let breakpointPercentileThreshold: Int + /// The buffer size. + public let bufferSize: Int + /// The maximum number of tokens that a chunk can contain. + public let maxTokens: Int + + @inlinable + public init(breakpointPercentileThreshold: Int, bufferSize: Int, maxTokens: Int) { + self.breakpointPercentileThreshold = breakpointPercentileThreshold + self.bufferSize = bufferSize + self.maxTokens = maxTokens + } + + private enum CodingKeys: String, CodingKey { + case breakpointPercentileThreshold = "breakpointPercentileThreshold" + case bufferSize = "bufferSize" + case maxTokens = "maxTokens" + } + } + public struct ServerSideEncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { /// The customer managed key used for encryption. For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for your instance. For information about valid ID values, see Key identifiers (KeyId). public let kmsKeyId: String? @@ -4012,6 +5824,8 @@ extension QConnect { } public struct SessionData: AWSDecodableShape { + /// The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that should be used by Amazon Q in Connect for this Session. + public let aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? /// The description of the session. public let description: String? /// The configuration information for the session integration. @@ -4028,7 +5842,8 @@ extension QConnect { public let tags: [String: String]? @inlinable - public init(description: String? = nil, integrationConfiguration: SessionIntegrationConfiguration? = nil, name: String, sessionArn: String, sessionId: String, tagFilter: TagFilter? = nil, tags: [String: String]? = nil) { + public init(aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, description: String? = nil, integrationConfiguration: SessionIntegrationConfiguration? = nil, name: String, sessionArn: String, sessionId: String, tagFilter: TagFilter? = nil, tags: [String: String]? = nil) { + self.aiAgentConfiguration = aiAgentConfiguration self.description = description self.integrationConfiguration = integrationConfiguration self.name = name @@ -4039,6 +5854,7 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { + case aiAgentConfiguration = "aiAgentConfiguration" case description = "description" case integrationConfiguration = "integrationConfiguration" case name = "name" @@ -4090,6 +5906,8 @@ extension QConnect { } public struct SourceContentDataDetails: AWSDecodableShape { + /// Contains information about where the text with a citation begins and ends in the generated output. + public let citationSpan: CitationSpan? /// The identifier of the source content. public let id: String /// Details about the source content ranking data. @@ -4100,7 +5918,8 @@ extension QConnect { public let type: SourceContentType @inlinable - public init(id: String, rankingData: RankingData, textData: TextData, type: SourceContentType) { + public init(citationSpan: CitationSpan? = nil, id: String, rankingData: RankingData, textData: TextData, type: SourceContentType) { + self.citationSpan = citationSpan self.id = id self.rankingData = rankingData self.textData = textData @@ -4108,6 +5927,7 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { + case citationSpan = "citationSpan" case id = "id" case rankingData = "rankingData" case textData = "textData" @@ -4235,138 +6055,340 @@ extension QConnect { case metadata = "metadata" case uploadId = "uploadId" } - } - - public struct StartImportJobResponse: AWSDecodableShape { - /// The import job. - public let importJob: ImportJobData? + } + + public struct StartImportJobResponse: AWSDecodableShape { + /// The import job. + public let importJob: ImportJobData? + + @inlinable + public init(importJob: ImportJobData? = nil) { + self.importJob = importJob + } + + private enum CodingKeys: String, CodingKey { + case importJob = "importJob" + } + } + + public struct TagCondition: AWSEncodableShape & AWSDecodableShape { + /// The tag key in the tag condition. + public let key: String + /// The tag value in the tag condition. + public let value: String? + + @inlinable + public init(key: String, value: String? = nil) { + self.key = key + self.value = value + } + + public func validate(name: String) throws { + try self.validate(self.key, name: "key", parent: name, max: 128) + try self.validate(self.key, name: "key", parent: name, min: 1) + try self.validate(self.key, name: "key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try self.validate(self.value, name: "value", parent: name, max: 256) + try self.validate(self.value, name: "value", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case value = "value" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// The tags used to organize, track, or control access for this resource. + public let tags: [String: String] + + @inlinable + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + try container.encode(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, min: 1) + } + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct TextData: AWSDecodableShape { + public let excerpt: DocumentText? + public let title: DocumentText? + + @inlinable + public init(excerpt: DocumentText? = nil, title: DocumentText? = nil) { + self.excerpt = excerpt + self.title = title + } + + private enum CodingKeys: String, CodingKey { + case excerpt = "excerpt" + case title = "title" + } + } + + public struct TextFullAIPromptEditTemplateConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The YAML text for the AI Prompt template. + public let text: String + + @inlinable + public init(text: String) { + self.text = text + } + + public func validate(name: String) throws { + try self.validate(self.text, name: "text", parent: name, max: 200000) + try self.validate(self.text, name: "text", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case text = "text" + } + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource. + public let resourceArn: String + /// The tag keys. + public let tagKeys: [String] + + @inlinable + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) + try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateAIAgentRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Agent. + public let aiAgentId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The configuration of the Amazon Q in Connect AI Agent. + public let configuration: AIAgentConfiguration? + /// The description of the Amazon Q in Connect AI Agent. + public let description: String? + /// The visbility status of the Amazon Q in Connect AI Agent. + public let visibilityStatus: VisibilityStatus + + @inlinable + public init(aiAgentId: String, assistantId: String, clientToken: String? = UpdateAIAgentRequest.idempotencyToken(), configuration: AIAgentConfiguration? = nil, description: String? = nil, visibilityStatus: VisibilityStatus) { + self.aiAgentId = aiAgentId + self.assistantId = assistantId + self.clientToken = clientToken + self.configuration = configuration + self.description = description + self.visibilityStatus = visibilityStatus + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.aiAgentId, key: "aiAgentId") + request.encodePath(self.assistantId, key: "assistantId") + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.configuration, forKey: .configuration) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encode(self.visibilityStatus, forKey: .visibilityStatus) + } - @inlinable - public init(importJob: ImportJobData? = nil) { - self.importJob = importJob + public func validate(name: String) throws { + try self.validate(self.aiAgentId, name: "aiAgentId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.configuration?.validate(name: "\(name).configuration") + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, pattern: "^[a-zA-Z0-9\\s_.,-]+") } private enum CodingKeys: String, CodingKey { - case importJob = "importJob" + case clientToken = "clientToken" + case configuration = "configuration" + case description = "description" + case visibilityStatus = "visibilityStatus" } } - public struct TagCondition: AWSEncodableShape & AWSDecodableShape { - /// The tag key in the tag condition. - public let key: String - /// The tag value in the tag condition. - public let value: String? + public struct UpdateAIAgentResponse: AWSDecodableShape { + /// The data of the updated Amazon Q in Connect AI Agent. + public let aiAgent: AIAgentData? @inlinable - public init(key: String, value: String? = nil) { - self.key = key - self.value = value - } - - public func validate(name: String) throws { - try self.validate(self.key, name: "key", parent: name, max: 128) - try self.validate(self.key, name: "key", parent: name, min: 1) - try self.validate(self.key, name: "key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - try self.validate(self.value, name: "value", parent: name, max: 256) - try self.validate(self.value, name: "value", parent: name, min: 1) + public init(aiAgent: AIAgentData? = nil) { + self.aiAgent = aiAgent } private enum CodingKeys: String, CodingKey { - case key = "key" - case value = "value" + case aiAgent = "aiAgent" } } - public struct TagResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the resource. - public let resourceArn: String - /// The tags used to organize, track, or control access for this resource. - public let tags: [String: String] + public struct UpdateAIPromptRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect AI Prompt. + public let aiPromptId: String + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. + public let clientToken: String? + /// The description of the Amazon Q in Connect AI Prompt. + public let description: String? + /// The configuration of the prompt template for this AI Prompt. + public let templateConfiguration: AIPromptTemplateConfiguration? + /// The visibility status of the Amazon Q in Connect AI prompt. + public let visibilityStatus: VisibilityStatus @inlinable - public init(resourceArn: String, tags: [String: String]) { - self.resourceArn = resourceArn - self.tags = tags + public init(aiPromptId: String, assistantId: String, clientToken: String? = UpdateAIPromptRequest.idempotencyToken(), description: String? = nil, templateConfiguration: AIPromptTemplateConfiguration? = nil, visibilityStatus: VisibilityStatus) { + self.aiPromptId = aiPromptId + self.assistantId = assistantId + self.clientToken = clientToken + self.description = description + self.templateConfiguration = templateConfiguration + self.visibilityStatus = visibilityStatus } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - try container.encode(self.tags, forKey: .tags) + request.encodePath(self.aiPromptId, key: "aiPromptId") + request.encodePath(self.assistantId, key: "assistantId") + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encodeIfPresent(self.description, forKey: .description) + try container.encodeIfPresent(self.templateConfiguration, forKey: .templateConfiguration) + try container.encode(self.visibilityStatus, forKey: .visibilityStatus) } public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") - try self.tags.forEach { - try validate($0.key, name: "tags.key", parent: name, max: 128) - try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, min: 1) - } + try self.validate(self.aiPromptId, name: "aiPromptId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$") + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 4096) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, max: 255) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, pattern: "^[a-zA-Z0-9\\s_.,-]+") + try self.templateConfiguration?.validate(name: "\(name).templateConfiguration") } private enum CodingKeys: String, CodingKey { - case tags = "tags" + case clientToken = "clientToken" + case description = "description" + case templateConfiguration = "templateConfiguration" + case visibilityStatus = "visibilityStatus" } } - public struct TagResourceResponse: AWSDecodableShape { - public init() {} - } - - public struct TextData: AWSDecodableShape { - public let excerpt: DocumentText? - public let title: DocumentText? + public struct UpdateAIPromptResponse: AWSDecodableShape { + /// The data of the updated Amazon Q in Connect AI Prompt. + public let aiPrompt: AIPromptData? @inlinable - public init(excerpt: DocumentText? = nil, title: DocumentText? = nil) { - self.excerpt = excerpt - self.title = title + public init(aiPrompt: AIPromptData? = nil) { + self.aiPrompt = aiPrompt } private enum CodingKeys: String, CodingKey { - case excerpt = "excerpt" - case title = "title" + case aiPrompt = "aiPrompt" } } - public struct UntagResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the resource. - public let resourceArn: String - /// The tag keys. - public let tagKeys: [String] + public struct UpdateAssistantAIAgentRequest: AWSEncodableShape { + /// The type of the AI Agent being updated for use by default on the Amazon Q in Connect Assistant. + public let aiAgentType: AIAgentType + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The configuration of the AI Agent being updated for use by default on the Amazon Q in Connect Assistant. + public let configuration: AIAgentConfigurationData @inlinable - public init(resourceArn: String, tagKeys: [String]) { - self.resourceArn = resourceArn - self.tagKeys = tagKeys + public init(aiAgentType: AIAgentType, assistantId: String, configuration: AIAgentConfigurationData) { + self.aiAgentType = aiAgentType + self.assistantId = assistantId + self.configuration = configuration } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "resourceArn") - request.encodeQuery(self.tagKeys, key: "tagKeys") + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(self.aiAgentType, forKey: .aiAgentType) + request.encodePath(self.assistantId, key: "assistantId") + try container.encode(self.configuration, forKey: .configuration) } public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") - try self.tagKeys.forEach { - try validate($0, name: "tagKeys[]", parent: name, max: 128) - try validate($0, name: "tagKeys[]", parent: name, min: 1) - try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - } - try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) - try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.configuration.validate(name: "\(name).configuration") } - private enum CodingKeys: CodingKey {} + private enum CodingKeys: String, CodingKey { + case aiAgentType = "aiAgentType" + case configuration = "configuration" + } } - public struct UntagResourceResponse: AWSDecodableShape { - public init() {} + public struct UpdateAssistantAIAgentResponse: AWSDecodableShape { + public let assistant: AssistantData? + + @inlinable + public init(assistant: AssistantData? = nil) { + self.assistant = assistant + } + + private enum CodingKeys: String, CodingKey { + case assistant = "assistant" + } } public struct UpdateContentRequest: AWSEncodableShape { @@ -4617,7 +6639,77 @@ extension QConnect { } } + public struct UpdateSessionDataRequest: AWSEncodableShape { + /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let assistantId: String + /// The data stored on the Amazon Q in Connect Session. + public let data: [RuntimeSessionData] + /// The namespace into which the session data is stored. Supported namespaces are: Custom + public let namespace: SessionDataNamespace? + /// The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN. + public let sessionId: String + + @inlinable + public init(assistantId: String, data: [RuntimeSessionData], namespace: SessionDataNamespace? = nil, sessionId: String) { + self.assistantId = assistantId + self.data = data + self.namespace = namespace + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.assistantId, key: "assistantId") + try container.encode(self.data, forKey: .data) + try container.encodeIfPresent(self.namespace, forKey: .namespace) + request.encodePath(self.sessionId, key: "sessionId") + } + + public func validate(name: String) throws { + try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + try self.data.forEach { + try $0.validate(name: "\(name).data[]") + } + try self.validate(self.data, name: "data", parent: name, max: 50) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") + } + + private enum CodingKeys: String, CodingKey { + case data = "data" + case namespace = "namespace" + } + } + + public struct UpdateSessionDataResponse: AWSDecodableShape { + /// Data stored in the session. + public let data: [RuntimeSessionData] + /// The namespace into which the session data is stored. Supported namespaces are: Custom + public let namespace: SessionDataNamespace + /// The Amazon Resource Name (ARN) of the session. + public let sessionArn: String + /// The identifier of the session. + public let sessionId: String + + @inlinable + public init(data: [RuntimeSessionData], namespace: SessionDataNamespace, sessionArn: String, sessionId: String) { + self.data = data + self.namespace = namespace + self.sessionArn = sessionArn + self.sessionId = sessionId + } + + private enum CodingKeys: String, CodingKey { + case data = "data" + case namespace = "namespace" + case sessionArn = "sessionArn" + case sessionId = "sessionId" + } + } + public struct UpdateSessionRequest: AWSEncodableShape { + /// The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that should be used by Amazon Q in Connect for this Session. + public let aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? /// The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN. public let assistantId: String /// The description. @@ -4628,7 +6720,8 @@ extension QConnect { public let tagFilter: TagFilter? @inlinable - public init(assistantId: String, description: String? = nil, sessionId: String, tagFilter: TagFilter? = nil) { + public init(aiAgentConfiguration: [AIAgentType: AIAgentConfigurationData]? = nil, assistantId: String, description: String? = nil, sessionId: String, tagFilter: TagFilter? = nil) { + self.aiAgentConfiguration = aiAgentConfiguration self.assistantId = assistantId self.description = description self.sessionId = sessionId @@ -4638,6 +6731,7 @@ extension QConnect { public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.aiAgentConfiguration, forKey: .aiAgentConfiguration) request.encodePath(self.assistantId, key: "assistantId") try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.sessionId, key: "sessionId") @@ -4645,6 +6739,9 @@ extension QConnect { } public func validate(name: String) throws { + try self.aiAgentConfiguration?.forEach { + try $0.value.validate(name: "\(name).aiAgentConfiguration[\"\($0.key)\"]") + } try self.validate(self.assistantId, name: "assistantId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$") try self.validate(self.description, name: "description", parent: name, max: 255) try self.validate(self.description, name: "description", parent: name, min: 1) @@ -4654,6 +6751,7 @@ extension QConnect { } private enum CodingKeys: String, CodingKey { + case aiAgentConfiguration = "aiAgentConfiguration" case description = "description" case tagFilter = "tagFilter" } @@ -4672,6 +6770,129 @@ extension QConnect { } } + public struct UrlConfiguration: AWSEncodableShape & AWSDecodableShape { + /// List of URLs for crawling. + public let seedUrls: [SeedUrl]? + + @inlinable + public init(seedUrls: [SeedUrl]? = nil) { + self.seedUrls = seedUrls + } + + public func validate(name: String) throws { + try self.seedUrls?.forEach { + try $0.validate(name: "\(name).seedUrls[]") + } + try self.validate(self.seedUrls, name: "seedUrls", parent: name, max: 100) + try self.validate(self.seedUrls, name: "seedUrls", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case seedUrls = "seedUrls" + } + } + + public struct VectorIngestionConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Details about how to chunk the documents in the data source. A chunk refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. + public let chunkingConfiguration: ChunkingConfiguration? + /// A custom parser for data source documents. + public let parsingConfiguration: ParsingConfiguration? + + @inlinable + public init(chunkingConfiguration: ChunkingConfiguration? = nil, parsingConfiguration: ParsingConfiguration? = nil) { + self.chunkingConfiguration = chunkingConfiguration + self.parsingConfiguration = parsingConfiguration + } + + public func validate(name: String) throws { + try self.chunkingConfiguration?.validate(name: "\(name).chunkingConfiguration") + try self.parsingConfiguration?.validate(name: "\(name).parsingConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case chunkingConfiguration = "chunkingConfiguration" + case parsingConfiguration = "parsingConfiguration" + } + } + + public struct WebCrawlerConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration of crawl limits for the web URLs. + public let crawlerLimits: WebCrawlerLimits? + /// A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled. + public let exclusionFilters: [String]? + /// A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn’t crawled. + public let inclusionFilters: [String]? + /// The scope of what is crawled for your URLs. You can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL https://docs.aws.amazon.com/bedrock/latest/userguide/ and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain aws.amazon.com can also include sub domain docs.aws.amazon.com. + public let scope: WebScopeType? + /// The configuration of the URL/URLs for the web content that you want to crawl. You should be authorized to crawl the URLs. + public let urlConfiguration: UrlConfiguration + + @inlinable + public init(crawlerLimits: WebCrawlerLimits? = nil, exclusionFilters: [String]? = nil, inclusionFilters: [String]? = nil, scope: WebScopeType? = nil, urlConfiguration: UrlConfiguration) { + self.crawlerLimits = crawlerLimits + self.exclusionFilters = exclusionFilters + self.inclusionFilters = inclusionFilters + self.scope = scope + self.urlConfiguration = urlConfiguration + } + + public func validate(name: String) throws { + try self.exclusionFilters?.forEach { + try validate($0, name: "exclusionFilters[]", parent: name, max: 1000) + try validate($0, name: "exclusionFilters[]", parent: name, min: 1) + } + try self.validate(self.exclusionFilters, name: "exclusionFilters", parent: name, max: 25) + try self.validate(self.exclusionFilters, name: "exclusionFilters", parent: name, min: 1) + try self.inclusionFilters?.forEach { + try validate($0, name: "inclusionFilters[]", parent: name, max: 1000) + try validate($0, name: "inclusionFilters[]", parent: name, min: 1) + } + try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, max: 25) + try self.validate(self.inclusionFilters, name: "inclusionFilters", parent: name, min: 1) + try self.urlConfiguration.validate(name: "\(name).urlConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case crawlerLimits = "crawlerLimits" + case exclusionFilters = "exclusionFilters" + case inclusionFilters = "inclusionFilters" + case scope = "scope" + case urlConfiguration = "urlConfiguration" + } + } + + public struct WebCrawlerLimits: AWSEncodableShape & AWSDecodableShape { + /// Rate of web URLs retrieved per minute. + public let rateLimit: Int? + + @inlinable + public init(rateLimit: Int? = nil) { + self.rateLimit = rateLimit + } + + private enum CodingKeys: String, CodingKey { + case rateLimit = "rateLimit" + } + } + + public struct AIPromptTemplateConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The configuration for a prompt template that supports full textual prompt configuration using a YAML prompt. + public let textFullAIPromptEditTemplateConfiguration: TextFullAIPromptEditTemplateConfiguration? + + @inlinable + public init(textFullAIPromptEditTemplateConfiguration: TextFullAIPromptEditTemplateConfiguration? = nil) { + self.textFullAIPromptEditTemplateConfiguration = textFullAIPromptEditTemplateConfiguration + } + + public func validate(name: String) throws { + try self.textFullAIPromptEditTemplateConfiguration?.validate(name: "\(name).textFullAIPromptEditTemplateConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case textFullAIPromptEditTemplateConfiguration = "textFullAIPromptEditTemplateConfiguration" + } + } + public struct AssistantAssociationInputData: AWSEncodableShape { /// The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base. public let knowledgeBaseId: String? @@ -4704,6 +6925,24 @@ extension QConnect { } } + public struct AssociationConfigurationData: AWSEncodableShape & AWSDecodableShape { + /// The data of the configuration for a KNOWLEDGE_BASE type Amazon Q in Connect Assistant Association. + public let knowledgeBaseAssociationConfigurationData: KnowledgeBaseAssociationConfigurationData? + + @inlinable + public init(knowledgeBaseAssociationConfigurationData: KnowledgeBaseAssociationConfigurationData? = nil) { + self.knowledgeBaseAssociationConfigurationData = knowledgeBaseAssociationConfigurationData + } + + public func validate(name: String) throws { + try self.knowledgeBaseAssociationConfigurationData?.validate(name: "\(name).knowledgeBaseAssociationConfigurationData") + } + + private enum CodingKeys: String, CodingKey { + case knowledgeBaseAssociationConfigurationData = "knowledgeBaseAssociationConfigurationData" + } + } + public struct Configuration: AWSEncodableShape & AWSDecodableShape { /// The configuration information of the Amazon Connect data source. public let connectConfiguration: ConnectConfiguration? @@ -4754,6 +6993,24 @@ extension QConnect { } } + public struct ManagedSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// Configuration data for web crawler data source. + public let webCrawlerConfiguration: WebCrawlerConfiguration? + + @inlinable + public init(webCrawlerConfiguration: WebCrawlerConfiguration? = nil) { + self.webCrawlerConfiguration = webCrawlerConfiguration + } + + public func validate(name: String) throws { + try self.webCrawlerConfiguration?.validate(name: "\(name).webCrawlerConfiguration") + } + + private enum CodingKeys: String, CodingKey { + case webCrawlerConfiguration = "webCrawlerConfiguration" + } + } + public struct QueryCondition: AWSEncodableShape { /// The condition for the query. public let single: QueryConditionItem? @@ -4819,21 +7076,22 @@ extension QConnect { } } - public struct SourceConfiguration: AWSEncodableShape & AWSDecodableShape { - /// Configuration information for Amazon AppIntegrations to automatically ingest content. - public let appIntegrations: AppIntegrationsConfiguration? + public struct RuntimeSessionDataValue: AWSEncodableShape & AWSDecodableShape { + /// The string value of the data stored on the session. + public let stringValue: String? @inlinable - public init(appIntegrations: AppIntegrationsConfiguration? = nil) { - self.appIntegrations = appIntegrations + public init(stringValue: String? = nil) { + self.stringValue = stringValue } public func validate(name: String) throws { - try self.appIntegrations?.validate(name: "\(name).appIntegrations") + try self.validate(self.stringValue, name: "stringValue", parent: name, max: 4096) + try self.validate(self.stringValue, name: "stringValue", parent: name, min: 1) } private enum CodingKeys: String, CodingKey { - case appIntegrations = "appIntegrations" + case stringValue = "stringValue" } } } diff --git a/Sources/Soto/Services/QuickSight/QuickSight_api.swift b/Sources/Soto/Services/QuickSight/QuickSight_api.swift index 467f059a90..e2d819fbd1 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_api.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_api.swift @@ -3117,6 +3117,35 @@ public struct QuickSight: AWSService { return try await self.describeNamespace(input, logger: logger) } + /// Describes a personalization configuration. + @Sendable + @inlinable + public func describeQPersonalizationConfiguration(_ input: DescribeQPersonalizationConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeQPersonalizationConfigurationResponse { + try await self.client.execute( + operation: "DescribeQPersonalizationConfiguration", + path: "/accounts/{AwsAccountId}/q-personalization-configuration", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes a personalization configuration. + /// + /// Parameters: + /// - awsAccountId: The ID of the Amazon Web Services account that contains the personalization configuration that the user wants described. + /// - logger: Logger use during operation + @inlinable + public func describeQPersonalizationConfiguration( + awsAccountId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DescribeQPersonalizationConfigurationResponse { + let input = DescribeQPersonalizationConfigurationRequest( + awsAccountId: awsAccountId + ) + return try await self.describeQPersonalizationConfiguration(input, logger: logger) + } + /// Provides a summary of a refresh schedule. @Sendable @inlinable @@ -4145,6 +4174,44 @@ public struct QuickSight: AWSService { return try await self.listFolders(input, logger: logger) } + /// List all folders that a resource is a member of. + @Sendable + @inlinable + public func listFoldersForResource(_ input: ListFoldersForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFoldersForResourceResponse { + try await self.client.execute( + operation: "ListFoldersForResource", + path: "/accounts/{AwsAccountId}/resource/{ResourceArn}/folders", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List all folders that a resource is a member of. + /// + /// Parameters: + /// - awsAccountId: The ID for the Amazon Web Services account that contains the resource. + /// - maxResults: The maximum number of results to be returned per request. + /// - nextToken: The token for the next set of results, or null if there are no more results. + /// - resourceArn: The Amazon Resource Name (ARN) the resource whose folders you need to list. + /// - logger: Logger use during operation + @inlinable + public func listFoldersForResource( + awsAccountId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListFoldersForResourceResponse { + let input = ListFoldersForResourceRequest( + awsAccountId: awsAccountId, + maxResults: maxResults, + nextToken: nextToken, + resourceArn: resourceArn + ) + return try await self.listFoldersForResource(input, logger: logger) + } + /// Lists member users in a group. @Sendable @inlinable @@ -5356,6 +5423,8 @@ public struct QuickSight: AWSService { /// - cloudFormationOverridePropertyConfiguration: An optional collection of structures that generate CloudFormation parameters to override the existing resource property values when the resource is exported to a new CloudFormation template. Use this field if the ExportFormat field of a StartAssetBundleExportJobRequest API call is set to CLOUDFORMATION_JSON. /// - exportFormat: The export data format. /// - includeAllDependencies: A Boolean that determines whether all dependencies of each resource ARN are recursively exported with the job. For example, say you provided a Dashboard ARN to the ResourceArns parameter. If you set IncludeAllDependencies to TRUE, any theme, dataset, and data source resource that is a dependency of the dashboard is also exported. + /// - includeFolderMembers: A setting that indicates whether you want to include folder assets. You can also use this setting to recusrsively include all subfolders of an exported folder. + /// - includeFolderMemberships: A Boolean that determines if the exported asset carries over information about the folders that the asset is a member of. /// - includePermissions: A Boolean that determines whether all permissions for each resource ARN are exported with the job. If you set IncludePermissions to TRUE, any permissions associated with each resource are exported. /// - includeTags: A Boolean that determines whether all tags for each resource ARN are exported with the job. If you set IncludeTags to TRUE, any tags associated with each resource are exported. /// - resourceArns: An array of resource ARNs to export. The following resources are supported. Analysis Dashboard DataSet DataSource RefreshSchedule Theme VPCConnection The API caller must have the necessary permissions in their IAM role to access each resource before the resources can be exported. @@ -5368,6 +5437,8 @@ public struct QuickSight: AWSService { cloudFormationOverridePropertyConfiguration: AssetBundleCloudFormationOverridePropertyConfiguration? = nil, exportFormat: AssetBundleExportFormat, includeAllDependencies: Bool? = nil, + includeFolderMembers: IncludeFolderMembers? = nil, + includeFolderMemberships: Bool? = nil, includePermissions: Bool? = nil, includeTags: Bool? = nil, resourceArns: [String], @@ -5380,6 +5451,8 @@ public struct QuickSight: AWSService { cloudFormationOverridePropertyConfiguration: cloudFormationOverridePropertyConfiguration, exportFormat: exportFormat, includeAllDependencies: includeAllDependencies, + includeFolderMembers: includeFolderMembers, + includeFolderMemberships: includeFolderMemberships, includePermissions: includePermissions, includeTags: includeTags, resourceArns: resourceArns, @@ -6377,6 +6450,38 @@ public struct QuickSight: AWSService { return try await self.updatePublicSharingSettings(input, logger: logger) } + /// Updates a personalization configuration. + @Sendable + @inlinable + public func updateQPersonalizationConfiguration(_ input: UpdateQPersonalizationConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateQPersonalizationConfigurationResponse { + try await self.client.execute( + operation: "UpdateQPersonalizationConfiguration", + path: "/accounts/{AwsAccountId}/q-personalization-configuration", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Updates a personalization configuration. + /// + /// Parameters: + /// - awsAccountId: The ID of the Amazon Web Services account account that contains the personalization configuration that the user wants to update. + /// - personalizationMode: An option to allow Amazon QuickSight to customize data stories with user specific metadata, specifically location and job information, in your IAM Identity Center instance. + /// - logger: Logger use during operation + @inlinable + public func updateQPersonalizationConfiguration( + awsAccountId: String, + personalizationMode: PersonalizationMode, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateQPersonalizationConfigurationResponse { + let input = UpdateQPersonalizationConfigurationRequest( + awsAccountId: awsAccountId, + personalizationMode: personalizationMode + ) + return try await self.updateQPersonalizationConfiguration(input, logger: logger) + } + /// Updates a refresh schedule for a dataset. @Sendable @inlinable @@ -7382,6 +7487,46 @@ extension QuickSight { return self.listFoldersPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listFoldersForResource(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listFoldersForResourcePaginator( + _ input: ListFoldersForResourceRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listFoldersForResource, + inputKey: \ListFoldersForResourceRequest.nextToken, + outputKey: \ListFoldersForResourceResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listFoldersForResource(_:logger:)``. + /// + /// - Parameters: + /// - awsAccountId: The ID for the Amazon Web Services account that contains the resource. + /// - maxResults: The maximum number of results to be returned per request. + /// - resourceArn: The Amazon Resource Name (ARN) the resource whose folders you need to list. + /// - logger: Logger used for logging + @inlinable + public func listFoldersForResourcePaginator( + awsAccountId: String, + maxResults: Int? = nil, + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListFoldersForResourceRequest( + awsAccountId: awsAccountId, + maxResults: maxResults, + resourceArn: resourceArn + ) + return self.listFoldersForResourcePaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listGroupMemberships(_:logger:)``. /// /// - Parameters: @@ -8385,6 +8530,18 @@ extension QuickSight.ListFolderMembersRequest: AWSPaginateToken { } } +extension QuickSight.ListFoldersForResourceRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> QuickSight.ListFoldersForResourceRequest { + return .init( + awsAccountId: self.awsAccountId, + maxResults: self.maxResults, + nextToken: token, + resourceArn: self.resourceArn + ) + } +} + extension QuickSight.ListFoldersRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> QuickSight.ListFoldersRequest { diff --git a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift index 502c084c6e..75c466fd80 100644 --- a/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift +++ b/Sources/Soto/Services/QuickSight/QuickSight_shapes.swift @@ -153,6 +153,12 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum AssetBundleExportJobFolderPropertyToOverride: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case name = "Name" + case parentFolderArn = "ParentFolderArn" + public var description: String { return self.rawValue } + } + public enum AssetBundleExportJobRefreshSchedulePropertyToOverride: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case startAfterDateTime = "StartAfterDateTime" public var description: String { return self.rawValue } @@ -338,6 +344,12 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum CommitMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case auto = "AUTO" + case manual = "MANUAL" + public var description: String { return self.rawValue } + } + public enum ComparisonMethod: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case difference = "DIFFERENCE" case percent = "PERCENT" @@ -800,6 +812,13 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum IncludeFolderMembers: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case none = "NONE" + case oneLevel = "ONE_LEVEL" + case recurse = "RECURSE" + public var description: String { return self.rawValue } + } + public enum IngestionErrorType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case accountCapacityLimitExceeded = "ACCOUNT_CAPACITY_LIMIT_EXCEEDED" case connectionFailure = "CONNECTION_FAILURE" @@ -1143,6 +1162,12 @@ extension QuickSight { public var description: String { return self.rawValue } } + public enum PersonalizationMode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum PivotTableConditionalFormattingScopeRole: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case field = "FIELD" case fieldTotal = "FIELD_TOTAL" @@ -3079,6 +3104,8 @@ extension QuickSight { public let dataSets: [AssetBundleExportJobDataSetOverrideProperties]? /// An optional list of structures that control how DataSource resources are parameterized in the returned CloudFormation template. public let dataSources: [AssetBundleExportJobDataSourceOverrideProperties]? + /// An optional list of structures that controls how Folder resources are parameterized in the returned CloudFormation template. + public let folders: [AssetBundleExportJobFolderOverrideProperties]? /// An optional list of structures that control how RefreshSchedule resources are parameterized in the returned CloudFormation template. public let refreshSchedules: [AssetBundleExportJobRefreshScheduleOverrideProperties]? /// An optional list of structures that control how resource IDs are parameterized in the returned CloudFormation template. @@ -3089,11 +3116,12 @@ extension QuickSight { public let vpcConnections: [AssetBundleExportJobVPCConnectionOverrideProperties]? @inlinable - public init(analyses: [AssetBundleExportJobAnalysisOverrideProperties]? = nil, dashboards: [AssetBundleExportJobDashboardOverrideProperties]? = nil, dataSets: [AssetBundleExportJobDataSetOverrideProperties]? = nil, dataSources: [AssetBundleExportJobDataSourceOverrideProperties]? = nil, refreshSchedules: [AssetBundleExportJobRefreshScheduleOverrideProperties]? = nil, resourceIdOverrideConfiguration: AssetBundleExportJobResourceIdOverrideConfiguration? = nil, themes: [AssetBundleExportJobThemeOverrideProperties]? = nil, vpcConnections: [AssetBundleExportJobVPCConnectionOverrideProperties]? = nil) { + public init(analyses: [AssetBundleExportJobAnalysisOverrideProperties]? = nil, dashboards: [AssetBundleExportJobDashboardOverrideProperties]? = nil, dataSets: [AssetBundleExportJobDataSetOverrideProperties]? = nil, dataSources: [AssetBundleExportJobDataSourceOverrideProperties]? = nil, folders: [AssetBundleExportJobFolderOverrideProperties]? = nil, refreshSchedules: [AssetBundleExportJobRefreshScheduleOverrideProperties]? = nil, resourceIdOverrideConfiguration: AssetBundleExportJobResourceIdOverrideConfiguration? = nil, themes: [AssetBundleExportJobThemeOverrideProperties]? = nil, vpcConnections: [AssetBundleExportJobVPCConnectionOverrideProperties]? = nil) { self.analyses = analyses self.dashboards = dashboards self.dataSets = dataSets self.dataSources = dataSources + self.folders = folders self.refreshSchedules = refreshSchedules self.resourceIdOverrideConfiguration = resourceIdOverrideConfiguration self.themes = themes @@ -3121,6 +3149,11 @@ extension QuickSight { } try self.validate(self.dataSources, name: "dataSources", parent: name, max: 50) try self.validate(self.dataSources, name: "dataSources", parent: name, min: 1) + try self.folders?.forEach { + try $0.validate(name: "\(name).folders[]") + } + try self.validate(self.folders, name: "folders", parent: name, max: 50) + try self.validate(self.folders, name: "folders", parent: name, min: 1) try self.refreshSchedules?.forEach { try $0.validate(name: "\(name).refreshSchedules[]") } @@ -3143,6 +3176,7 @@ extension QuickSight { case dashboards = "Dashboards" case dataSets = "DataSets" case dataSources = "DataSources" + case folders = "Folders" case refreshSchedules = "RefreshSchedules" case resourceIdOverrideConfiguration = "ResourceIdOverrideConfiguration" case themes = "Themes" @@ -3264,6 +3298,29 @@ extension QuickSight { } } + public struct AssetBundleExportJobFolderOverrideProperties: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the specific Folder resource whose override properties are configured in this structure. + public let arn: String + /// A list of Folder resource properties to generate variables for in the returned CloudFormation template. + public let properties: [AssetBundleExportJobFolderPropertyToOverride] + + @inlinable + public init(arn: String, properties: [AssetBundleExportJobFolderPropertyToOverride]) { + self.arn = arn + self.properties = properties + } + + public func validate(name: String) throws { + try self.validate(self.properties, name: "properties", parent: name, max: 10) + try self.validate(self.properties, name: "properties", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case arn = "Arn" + case properties = "Properties" + } + } + public struct AssetBundleExportJobRefreshScheduleOverrideProperties: AWSEncodableShape & AWSDecodableShape { /// The ARN of the specific RefreshSchedule resource whose override properties are configured in this structure. public let arn: String @@ -3837,6 +3894,90 @@ extension QuickSight { } } + public struct AssetBundleImportJobFolderOverrideParameters: AWSEncodableShape & AWSDecodableShape { + /// The ID of the folder that you want to apply overrides to. + public let folderId: String + /// A new name for the folder. + public let name: String? + /// A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved. + public let parentFolderArn: String? + + @inlinable + public init(folderId: String, name: String? = nil, parentFolderArn: String? = nil) { + self.folderId = folderId + self.name = name + self.parentFolderArn = parentFolderArn + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 128) + try self.validate(self.name, name: "name", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case folderId = "FolderId" + case name = "Name" + case parentFolderArn = "ParentFolderArn" + } + } + + public struct AssetBundleImportJobFolderOverridePermissions: AWSEncodableShape & AWSDecodableShape { + /// A list of folder IDs that you want to apply overrides to. You can use * to override all folders in this asset bundle. + public let folderIds: [String] + public let permissions: AssetBundleResourcePermissions? + + @inlinable + public init(folderIds: [String], permissions: AssetBundleResourcePermissions? = nil) { + self.folderIds = folderIds + self.permissions = permissions + } + + public func validate(name: String) throws { + try self.folderIds.forEach { + try validate($0, name: "folderIds[]", parent: name, pattern: "^\\*|[\\w\\-]{1,2048}$") + } + try self.validate(self.folderIds, name: "folderIds", parent: name, max: 50) + try self.validate(self.folderIds, name: "folderIds", parent: name, min: 1) + try self.permissions?.validate(name: "\(name).permissions") + } + + private enum CodingKeys: String, CodingKey { + case folderIds = "FolderIds" + case permissions = "Permissions" + } + } + + public struct AssetBundleImportJobFolderOverrideTags: AWSEncodableShape & AWSDecodableShape { + /// A list of folder IDs that you want to apply overrides to. You can use * to override all folders in this asset bundle. + public let folderIds: [String] + /// A list of tags for the folders that you want to apply overrides to. + public let tags: [Tag] + + @inlinable + public init(folderIds: [String], tags: [Tag]) { + self.folderIds = folderIds + self.tags = tags + } + + public func validate(name: String) throws { + try self.folderIds.forEach { + try validate($0, name: "folderIds[]", parent: name, pattern: "^\\*|[\\w\\-]{1,2048}$") + } + try self.validate(self.folderIds, name: "folderIds", parent: name, max: 50) + try self.validate(self.folderIds, name: "folderIds", parent: name, min: 1) + try self.tags.forEach { + try $0.validate(name: "\(name).tags[]") + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.validate(self.tags, name: "tags", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case folderIds = "FolderIds" + case tags = "Tags" + } + } + public struct AssetBundleImportJobOverrideParameters: AWSEncodableShape & AWSDecodableShape { /// A list of overrides for any Analysis resources that are present in the asset bundle that is imported. public let analyses: [AssetBundleImportJobAnalysisOverrideParameters]? @@ -3846,6 +3987,8 @@ extension QuickSight { public let dataSets: [AssetBundleImportJobDataSetOverrideParameters]? /// A list of overrides for any DataSource resources that are present in the asset bundle that is imported. public let dataSources: [AssetBundleImportJobDataSourceOverrideParameters]? + /// A list of overrides for any Folder resources that are present in the asset bundle that is imported. + public let folders: [AssetBundleImportJobFolderOverrideParameters]? /// A list of overrides for any RefreshSchedule resources that are present in the asset bundle that is imported. public let refreshSchedules: [AssetBundleImportJobRefreshScheduleOverrideParameters]? /// An optional structure that configures resource ID overrides to be applied within the import job. @@ -3856,11 +3999,12 @@ extension QuickSight { public let vpcConnections: [AssetBundleImportJobVPCConnectionOverrideParameters]? @inlinable - public init(analyses: [AssetBundleImportJobAnalysisOverrideParameters]? = nil, dashboards: [AssetBundleImportJobDashboardOverrideParameters]? = nil, dataSets: [AssetBundleImportJobDataSetOverrideParameters]? = nil, dataSources: [AssetBundleImportJobDataSourceOverrideParameters]? = nil, refreshSchedules: [AssetBundleImportJobRefreshScheduleOverrideParameters]? = nil, resourceIdOverrideConfiguration: AssetBundleImportJobResourceIdOverrideConfiguration? = nil, themes: [AssetBundleImportJobThemeOverrideParameters]? = nil, vpcConnections: [AssetBundleImportJobVPCConnectionOverrideParameters]? = nil) { + public init(analyses: [AssetBundleImportJobAnalysisOverrideParameters]? = nil, dashboards: [AssetBundleImportJobDashboardOverrideParameters]? = nil, dataSets: [AssetBundleImportJobDataSetOverrideParameters]? = nil, dataSources: [AssetBundleImportJobDataSourceOverrideParameters]? = nil, folders: [AssetBundleImportJobFolderOverrideParameters]? = nil, refreshSchedules: [AssetBundleImportJobRefreshScheduleOverrideParameters]? = nil, resourceIdOverrideConfiguration: AssetBundleImportJobResourceIdOverrideConfiguration? = nil, themes: [AssetBundleImportJobThemeOverrideParameters]? = nil, vpcConnections: [AssetBundleImportJobVPCConnectionOverrideParameters]? = nil) { self.analyses = analyses self.dashboards = dashboards self.dataSets = dataSets self.dataSources = dataSources + self.folders = folders self.refreshSchedules = refreshSchedules self.resourceIdOverrideConfiguration = resourceIdOverrideConfiguration self.themes = themes @@ -3888,6 +4032,11 @@ extension QuickSight { } try self.validate(self.dataSources, name: "dataSources", parent: name, max: 50) try self.validate(self.dataSources, name: "dataSources", parent: name, min: 1) + try self.folders?.forEach { + try $0.validate(name: "\(name).folders[]") + } + try self.validate(self.folders, name: "folders", parent: name, max: 50) + try self.validate(self.folders, name: "folders", parent: name, min: 1) try self.validate(self.refreshSchedules, name: "refreshSchedules", parent: name, max: 50) try self.validate(self.refreshSchedules, name: "refreshSchedules", parent: name, min: 1) try self.themes?.forEach { @@ -3907,6 +4056,7 @@ extension QuickSight { case dashboards = "Dashboards" case dataSets = "DataSets" case dataSources = "DataSources" + case folders = "Folders" case refreshSchedules = "RefreshSchedules" case resourceIdOverrideConfiguration = "ResourceIdOverrideConfiguration" case themes = "Themes" @@ -3923,15 +4073,18 @@ extension QuickSight { public let dataSets: [AssetBundleImportJobDataSetOverridePermissions]? /// A list of permissions overrides for any DataSource resources that are present in the asset bundle that is imported. public let dataSources: [AssetBundleImportJobDataSourceOverridePermissions]? + /// A list of permissions for the folders that you want to apply overrides to. + public let folders: [AssetBundleImportJobFolderOverridePermissions]? /// A list of permissions overrides for any Theme resources that are present in the asset bundle that is imported. public let themes: [AssetBundleImportJobThemeOverridePermissions]? @inlinable - public init(analyses: [AssetBundleImportJobAnalysisOverridePermissions]? = nil, dashboards: [AssetBundleImportJobDashboardOverridePermissions]? = nil, dataSets: [AssetBundleImportJobDataSetOverridePermissions]? = nil, dataSources: [AssetBundleImportJobDataSourceOverridePermissions]? = nil, themes: [AssetBundleImportJobThemeOverridePermissions]? = nil) { + public init(analyses: [AssetBundleImportJobAnalysisOverridePermissions]? = nil, dashboards: [AssetBundleImportJobDashboardOverridePermissions]? = nil, dataSets: [AssetBundleImportJobDataSetOverridePermissions]? = nil, dataSources: [AssetBundleImportJobDataSourceOverridePermissions]? = nil, folders: [AssetBundleImportJobFolderOverridePermissions]? = nil, themes: [AssetBundleImportJobThemeOverridePermissions]? = nil) { self.analyses = analyses self.dashboards = dashboards self.dataSets = dataSets self.dataSources = dataSources + self.folders = folders self.themes = themes } @@ -3956,6 +4109,11 @@ extension QuickSight { } try self.validate(self.dataSources, name: "dataSources", parent: name, max: 2) try self.validate(self.dataSources, name: "dataSources", parent: name, min: 1) + try self.folders?.forEach { + try $0.validate(name: "\(name).folders[]") + } + try self.validate(self.folders, name: "folders", parent: name, max: 2) + try self.validate(self.folders, name: "folders", parent: name, min: 1) try self.themes?.forEach { try $0.validate(name: "\(name).themes[]") } @@ -3968,6 +4126,7 @@ extension QuickSight { case dashboards = "Dashboards" case dataSets = "DataSets" case dataSources = "DataSources" + case folders = "Folders" case themes = "Themes" } } @@ -3981,17 +4140,20 @@ extension QuickSight { public let dataSets: [AssetBundleImportJobDataSetOverrideTags]? /// A list of tag overrides for any DataSource resources that are present in the asset bundle that is imported. public let dataSources: [AssetBundleImportJobDataSourceOverrideTags]? + /// A list of tag overrides for any Folder resources that are present in the asset bundle that is imported. + public let folders: [AssetBundleImportJobFolderOverrideTags]? /// A list of tag overrides for any Theme resources that are present in the asset bundle that is imported. public let themes: [AssetBundleImportJobThemeOverrideTags]? /// A list of tag overrides for any VPCConnection resources that are present in the asset bundle that is imported. public let vpcConnections: [AssetBundleImportJobVPCConnectionOverrideTags]? @inlinable - public init(analyses: [AssetBundleImportJobAnalysisOverrideTags]? = nil, dashboards: [AssetBundleImportJobDashboardOverrideTags]? = nil, dataSets: [AssetBundleImportJobDataSetOverrideTags]? = nil, dataSources: [AssetBundleImportJobDataSourceOverrideTags]? = nil, themes: [AssetBundleImportJobThemeOverrideTags]? = nil, vpcConnections: [AssetBundleImportJobVPCConnectionOverrideTags]? = nil) { + public init(analyses: [AssetBundleImportJobAnalysisOverrideTags]? = nil, dashboards: [AssetBundleImportJobDashboardOverrideTags]? = nil, dataSets: [AssetBundleImportJobDataSetOverrideTags]? = nil, dataSources: [AssetBundleImportJobDataSourceOverrideTags]? = nil, folders: [AssetBundleImportJobFolderOverrideTags]? = nil, themes: [AssetBundleImportJobThemeOverrideTags]? = nil, vpcConnections: [AssetBundleImportJobVPCConnectionOverrideTags]? = nil) { self.analyses = analyses self.dashboards = dashboards self.dataSets = dataSets self.dataSources = dataSources + self.folders = folders self.themes = themes self.vpcConnections = vpcConnections } @@ -4017,6 +4179,11 @@ extension QuickSight { } try self.validate(self.dataSources, name: "dataSources", parent: name, max: 5) try self.validate(self.dataSources, name: "dataSources", parent: name, min: 1) + try self.folders?.forEach { + try $0.validate(name: "\(name).folders[]") + } + try self.validate(self.folders, name: "folders", parent: name, max: 5) + try self.validate(self.folders, name: "folders", parent: name, min: 1) try self.themes?.forEach { try $0.validate(name: "\(name).themes[]") } @@ -4034,6 +4201,7 @@ extension QuickSight { case dashboards = "Dashboards" case dataSets = "DataSets" case dataSources = "DataSources" + case folders = "Folders" case themes = "Themes" case vpcConnections = "VPCConnections" } @@ -12052,13 +12220,16 @@ extension QuickSight { } public struct DefaultDateTimePickerControlOptions: AWSEncodableShape & AWSDecodableShape { + /// The visibility configuration of the Apply button on a DateTimePickerControl. + public let commitMode: CommitMode? /// The display options of a control. public let displayOptions: DateTimePickerControlDisplayOptions? /// The date time picker type of the DefaultDateTimePickerControlOptions. Choose one of the following options: SINGLE_VALUED: The filter condition is a fixed date. DATE_RANGE: The filter condition is a date time range. public let type: SheetControlDateTimePickerType? @inlinable - public init(displayOptions: DateTimePickerControlDisplayOptions? = nil, type: SheetControlDateTimePickerType? = nil) { + public init(commitMode: CommitMode? = nil, displayOptions: DateTimePickerControlDisplayOptions? = nil, type: SheetControlDateTimePickerType? = nil) { + self.commitMode = commitMode self.displayOptions = displayOptions self.type = type } @@ -12068,6 +12239,7 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" case type = "Type" } @@ -12146,6 +12318,8 @@ extension QuickSight { } public struct DefaultFilterDropDownControlOptions: AWSEncodableShape & AWSDecodableShape { + /// The visibility configuration of the Apply button on a FilterDropDownControl. + public let commitMode: CommitMode? /// The display options of a control. public let displayOptions: DropDownControlDisplayOptions? /// A list of selectable values that are used in a control. @@ -12154,7 +12328,8 @@ extension QuickSight { public let type: SheetControlListType? @inlinable - public init(displayOptions: DropDownControlDisplayOptions? = nil, selectableValues: FilterSelectableValues? = nil, type: SheetControlListType? = nil) { + public init(commitMode: CommitMode? = nil, displayOptions: DropDownControlDisplayOptions? = nil, selectableValues: FilterSelectableValues? = nil, type: SheetControlListType? = nil) { + self.commitMode = commitMode self.displayOptions = displayOptions self.selectableValues = selectableValues self.type = type @@ -12166,6 +12341,7 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" case selectableValues = "SelectableValues" case type = "Type" @@ -12304,11 +12480,14 @@ extension QuickSight { } public struct DefaultRelativeDateTimeControlOptions: AWSEncodableShape & AWSDecodableShape { + /// The visibility configuration of the Apply button on a RelativeDateTimeControl. + public let commitMode: CommitMode? /// The display options of a control. public let displayOptions: RelativeDateTimeControlDisplayOptions? @inlinable - public init(displayOptions: RelativeDateTimeControlDisplayOptions? = nil) { + public init(commitMode: CommitMode? = nil, displayOptions: RelativeDateTimeControlDisplayOptions? = nil) { + self.commitMode = commitMode self.displayOptions = displayOptions } @@ -12317,6 +12496,7 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" } } @@ -14531,6 +14711,10 @@ extension QuickSight { public let exportFormat: AssetBundleExportFormat? /// The include dependencies flag. public let includeAllDependencies: Bool? + /// A setting that determines whether folder members are included. + public let includeFolderMembers: IncludeFolderMembers? + /// The include folder memberships flag. + public let includeFolderMemberships: Bool? /// The include permissions flag. public let includePermissions: Bool? /// The include tags flag. @@ -14549,7 +14733,7 @@ extension QuickSight { public let warnings: [AssetBundleExportJobWarning]? @inlinable - public init(arn: String? = nil, assetBundleExportJobId: String? = nil, awsAccountId: String? = nil, cloudFormationOverridePropertyConfiguration: AssetBundleCloudFormationOverridePropertyConfiguration? = nil, createdTime: Date? = nil, downloadUrl: String? = nil, errors: [AssetBundleExportJobError]? = nil, exportFormat: AssetBundleExportFormat? = nil, includeAllDependencies: Bool? = nil, includePermissions: Bool? = nil, includeTags: Bool? = nil, jobStatus: AssetBundleExportJobStatus? = nil, requestId: String? = nil, resourceArns: [String]? = nil, status: Int? = nil, validationStrategy: AssetBundleExportJobValidationStrategy? = nil, warnings: [AssetBundleExportJobWarning]? = nil) { + public init(arn: String? = nil, assetBundleExportJobId: String? = nil, awsAccountId: String? = nil, cloudFormationOverridePropertyConfiguration: AssetBundleCloudFormationOverridePropertyConfiguration? = nil, createdTime: Date? = nil, downloadUrl: String? = nil, errors: [AssetBundleExportJobError]? = nil, exportFormat: AssetBundleExportFormat? = nil, includeAllDependencies: Bool? = nil, includeFolderMembers: IncludeFolderMembers? = nil, includeFolderMemberships: Bool? = nil, includePermissions: Bool? = nil, includeTags: Bool? = nil, jobStatus: AssetBundleExportJobStatus? = nil, requestId: String? = nil, resourceArns: [String]? = nil, status: Int? = nil, validationStrategy: AssetBundleExportJobValidationStrategy? = nil, warnings: [AssetBundleExportJobWarning]? = nil) { self.arn = arn self.assetBundleExportJobId = assetBundleExportJobId self.awsAccountId = awsAccountId @@ -14559,6 +14743,8 @@ extension QuickSight { self.errors = errors self.exportFormat = exportFormat self.includeAllDependencies = includeAllDependencies + self.includeFolderMembers = includeFolderMembers + self.includeFolderMemberships = includeFolderMemberships self.includePermissions = includePermissions self.includeTags = includeTags self.jobStatus = jobStatus @@ -14581,6 +14767,8 @@ extension QuickSight { self.errors = try container.decodeIfPresent([AssetBundleExportJobError].self, forKey: .errors) self.exportFormat = try container.decodeIfPresent(AssetBundleExportFormat.self, forKey: .exportFormat) self.includeAllDependencies = try container.decodeIfPresent(Bool.self, forKey: .includeAllDependencies) + self.includeFolderMembers = try container.decodeIfPresent(IncludeFolderMembers.self, forKey: .includeFolderMembers) + self.includeFolderMemberships = try container.decodeIfPresent(Bool.self, forKey: .includeFolderMemberships) self.includePermissions = try container.decodeIfPresent(Bool.self, forKey: .includePermissions) self.includeTags = try container.decodeIfPresent(Bool.self, forKey: .includeTags) self.jobStatus = try container.decodeIfPresent(AssetBundleExportJobStatus.self, forKey: .jobStatus) @@ -14601,6 +14789,8 @@ extension QuickSight { case errors = "Errors" case exportFormat = "ExportFormat" case includeAllDependencies = "IncludeAllDependencies" + case includeFolderMembers = "IncludeFolderMembers" + case includeFolderMemberships = "IncludeFolderMemberships" case includePermissions = "IncludePermissions" case includeTags = "IncludeTags" case jobStatus = "JobStatus" @@ -16170,6 +16360,59 @@ extension QuickSight { } } + public struct DescribeQPersonalizationConfigurationRequest: AWSEncodableShape { + /// The ID of the Amazon Web Services account that contains the personalization configuration that the user wants described. + public let awsAccountId: String + + @inlinable + public init(awsAccountId: String) { + self.awsAccountId = awsAccountId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DescribeQPersonalizationConfigurationResponse: AWSDecodableShape { + /// A value that indicates whether personalization is enabled or not. + public let personalizationMode: PersonalizationMode? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + + @inlinable + public init(personalizationMode: PersonalizationMode? = nil, requestId: String? = nil, status: Int? = nil) { + self.personalizationMode = personalizationMode + self.requestId = requestId + self.status = status + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.personalizationMode = try container.decodeIfPresent(PersonalizationMode.self, forKey: .personalizationMode) + self.requestId = try container.decodeIfPresent(String.self, forKey: .requestId) + self.status = response.decodeStatus() + } + + private enum CodingKeys: String, CodingKey { + case personalizationMode = "PersonalizationMode" + case requestId = "RequestId" + } + } + public struct DescribeRefreshScheduleRequest: AWSEncodableShape { /// The Amazon Web Services account ID. public let awsAccountId: String @@ -18263,6 +18506,8 @@ extension QuickSight { } public struct FilterDateTimePickerControl: AWSEncodableShape & AWSDecodableShape { + /// The visibility configurationof the Apply button on a DateTimePickerControl. + public let commitMode: CommitMode? /// The display options of a control. public let displayOptions: DateTimePickerControlDisplayOptions? /// The ID of the FilterDateTimePickerControl. @@ -18275,7 +18520,8 @@ extension QuickSight { public let type: SheetControlDateTimePickerType? @inlinable - public init(displayOptions: DateTimePickerControlDisplayOptions? = nil, filterControlId: String, sourceFilterId: String, title: String, type: SheetControlDateTimePickerType? = nil) { + public init(commitMode: CommitMode? = nil, displayOptions: DateTimePickerControlDisplayOptions? = nil, filterControlId: String, sourceFilterId: String, title: String, type: SheetControlDateTimePickerType? = nil) { + self.commitMode = commitMode self.displayOptions = displayOptions self.filterControlId = filterControlId self.sourceFilterId = sourceFilterId @@ -18296,6 +18542,7 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" case filterControlId = "FilterControlId" case sourceFilterId = "SourceFilterId" @@ -18307,6 +18554,8 @@ extension QuickSight { public struct FilterDropDownControl: AWSEncodableShape & AWSDecodableShape { /// The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls. public let cascadingControlConfiguration: CascadingControlConfiguration? + /// The visibility configuration of the Apply button on a FilterDropDownControl. + public let commitMode: CommitMode? /// The display options of the FilterDropDownControl. public let displayOptions: DropDownControlDisplayOptions? /// The ID of the FilterDropDownControl. @@ -18321,8 +18570,9 @@ extension QuickSight { public let type: SheetControlListType? @inlinable - public init(cascadingControlConfiguration: CascadingControlConfiguration? = nil, displayOptions: DropDownControlDisplayOptions? = nil, filterControlId: String, selectableValues: FilterSelectableValues? = nil, sourceFilterId: String, title: String, type: SheetControlListType? = nil) { + public init(cascadingControlConfiguration: CascadingControlConfiguration? = nil, commitMode: CommitMode? = nil, displayOptions: DropDownControlDisplayOptions? = nil, filterControlId: String, selectableValues: FilterSelectableValues? = nil, sourceFilterId: String, title: String, type: SheetControlListType? = nil) { self.cascadingControlConfiguration = cascadingControlConfiguration + self.commitMode = commitMode self.displayOptions = displayOptions self.filterControlId = filterControlId self.selectableValues = selectableValues @@ -18347,6 +18597,7 @@ extension QuickSight { private enum CodingKeys: String, CodingKey { case cascadingControlConfiguration = "CascadingControlConfiguration" + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" case filterControlId = "FilterControlId" case selectableValues = "SelectableValues" @@ -18555,6 +18806,8 @@ extension QuickSight { } public struct FilterRelativeDateTimeControl: AWSEncodableShape & AWSDecodableShape { + /// The visibility configuration of the Apply button on a FilterRelativeDateTimeControl. + public let commitMode: CommitMode? /// The display options of a control. public let displayOptions: RelativeDateTimeControlDisplayOptions? /// The ID of the FilterTextAreaControl. @@ -18565,7 +18818,8 @@ extension QuickSight { public let title: String @inlinable - public init(displayOptions: RelativeDateTimeControlDisplayOptions? = nil, filterControlId: String, sourceFilterId: String, title: String) { + public init(commitMode: CommitMode? = nil, displayOptions: RelativeDateTimeControlDisplayOptions? = nil, filterControlId: String, sourceFilterId: String, title: String) { + self.commitMode = commitMode self.displayOptions = displayOptions self.filterControlId = filterControlId self.sourceFilterId = sourceFilterId @@ -18585,6 +18839,7 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" case filterControlId = "FilterControlId" case sourceFilterId = "SourceFilterId" @@ -23278,6 +23533,78 @@ extension QuickSight { } } + public struct ListFoldersForResourceRequest: AWSEncodableShape { + /// The ID for the Amazon Web Services account that contains the resource. + public let awsAccountId: String + /// The maximum number of results to be returned per request. + public let maxResults: Int? + /// The token for the next set of results, or null if there are no more results. + public let nextToken: String? + /// The Amazon Resource Name (ARN) the resource whose folders you need to list. + public let resourceArn: String + + @inlinable + public init(awsAccountId: String, maxResults: Int? = nil, nextToken: String? = nil, resourceArn: String) { + self.awsAccountId = awsAccountId + self.maxResults = maxResults + self.nextToken = nextToken + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + request.encodeQuery(self.maxResults, key: "max-results") + request.encodeQuery(self.nextToken, key: "next-token") + request.encodePath(self.resourceArn, key: "ResourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListFoldersForResourceResponse: AWSDecodableShape { + /// A list that contains the Amazon Resource Names (ARNs) of all folders that the resource is a member of. + public let folders: [String]? + /// The token for the next set of results, or null if there are no more results. + public let nextToken: String? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + + @inlinable + public init(folders: [String]? = nil, nextToken: String? = nil, requestId: String? = nil, status: Int? = nil) { + self.folders = folders + self.nextToken = nextToken + self.requestId = requestId + self.status = status + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.folders = try container.decodeIfPresent([String].self, forKey: .folders) + self.nextToken = try container.decodeIfPresent(String.self, forKey: .nextToken) + self.requestId = try container.decodeIfPresent(String.self, forKey: .requestId) + self.status = response.decodeStatus() + } + + private enum CodingKeys: String, CodingKey { + case folders = "Folders" + case nextToken = "NextToken" + case requestId = "RequestId" + } + } + public struct ListFoldersRequest: AWSEncodableShape { /// The ID for the Amazon Web Services account that contains the folder. public let awsAccountId: String @@ -26350,6 +26677,8 @@ extension QuickSight { public struct ParameterDropDownControl: AWSEncodableShape & AWSDecodableShape { /// The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls. public let cascadingControlConfiguration: CascadingControlConfiguration? + /// The visibility configuration of the Apply button on a ParameterDropDownControl. + public let commitMode: CommitMode? /// The display options of a control. public let displayOptions: DropDownControlDisplayOptions? /// The ID of the ParameterDropDownControl. @@ -26364,8 +26693,9 @@ extension QuickSight { public let type: SheetControlListType? @inlinable - public init(cascadingControlConfiguration: CascadingControlConfiguration? = nil, displayOptions: DropDownControlDisplayOptions? = nil, parameterControlId: String, selectableValues: ParameterSelectableValues? = nil, sourceParameterName: String, title: String, type: SheetControlListType? = nil) { + public init(cascadingControlConfiguration: CascadingControlConfiguration? = nil, commitMode: CommitMode? = nil, displayOptions: DropDownControlDisplayOptions? = nil, parameterControlId: String, selectableValues: ParameterSelectableValues? = nil, sourceParameterName: String, title: String, type: SheetControlListType? = nil) { self.cascadingControlConfiguration = cascadingControlConfiguration + self.commitMode = commitMode self.displayOptions = displayOptions self.parameterControlId = parameterControlId self.selectableValues = selectableValues @@ -26390,6 +26720,7 @@ extension QuickSight { private enum CodingKeys: String, CodingKey { case cascadingControlConfiguration = "CascadingControlConfiguration" + case commitMode = "CommitMode" case displayOptions = "DisplayOptions" case parameterControlId = "ParameterControlId" case selectableValues = "SelectableValues" @@ -31791,6 +32122,10 @@ extension QuickSight { public let exportFormat: AssetBundleExportFormat /// A Boolean that determines whether all dependencies of each resource ARN are recursively exported with the job. For example, say you provided a Dashboard ARN to the ResourceArns parameter. If you set IncludeAllDependencies to TRUE, any theme, dataset, and data source resource that is a dependency of the dashboard is also exported. public let includeAllDependencies: Bool? + /// A setting that indicates whether you want to include folder assets. You can also use this setting to recusrsively include all subfolders of an exported folder. + public let includeFolderMembers: IncludeFolderMembers? + /// A Boolean that determines if the exported asset carries over information about the folders that the asset is a member of. + public let includeFolderMemberships: Bool? /// A Boolean that determines whether all permissions for each resource ARN are exported with the job. If you set IncludePermissions to TRUE, any permissions associated with each resource are exported. public let includePermissions: Bool? /// A Boolean that determines whether all tags for each resource ARN are exported with the job. If you set IncludeTags to TRUE, any tags associated with each resource are exported. @@ -31801,12 +32136,14 @@ extension QuickSight { public let validationStrategy: AssetBundleExportJobValidationStrategy? @inlinable - public init(assetBundleExportJobId: String, awsAccountId: String, cloudFormationOverridePropertyConfiguration: AssetBundleCloudFormationOverridePropertyConfiguration? = nil, exportFormat: AssetBundleExportFormat, includeAllDependencies: Bool? = nil, includePermissions: Bool? = nil, includeTags: Bool? = nil, resourceArns: [String], validationStrategy: AssetBundleExportJobValidationStrategy? = nil) { + public init(assetBundleExportJobId: String, awsAccountId: String, cloudFormationOverridePropertyConfiguration: AssetBundleCloudFormationOverridePropertyConfiguration? = nil, exportFormat: AssetBundleExportFormat, includeAllDependencies: Bool? = nil, includeFolderMembers: IncludeFolderMembers? = nil, includeFolderMemberships: Bool? = nil, includePermissions: Bool? = nil, includeTags: Bool? = nil, resourceArns: [String], validationStrategy: AssetBundleExportJobValidationStrategy? = nil) { self.assetBundleExportJobId = assetBundleExportJobId self.awsAccountId = awsAccountId self.cloudFormationOverridePropertyConfiguration = cloudFormationOverridePropertyConfiguration self.exportFormat = exportFormat self.includeAllDependencies = includeAllDependencies + self.includeFolderMembers = includeFolderMembers + self.includeFolderMemberships = includeFolderMemberships self.includePermissions = includePermissions self.includeTags = includeTags self.resourceArns = resourceArns @@ -31821,6 +32158,8 @@ extension QuickSight { try container.encodeIfPresent(self.cloudFormationOverridePropertyConfiguration, forKey: .cloudFormationOverridePropertyConfiguration) try container.encode(self.exportFormat, forKey: .exportFormat) try container.encodeIfPresent(self.includeAllDependencies, forKey: .includeAllDependencies) + try container.encodeIfPresent(self.includeFolderMembers, forKey: .includeFolderMembers) + try container.encodeIfPresent(self.includeFolderMemberships, forKey: .includeFolderMemberships) try container.encodeIfPresent(self.includePermissions, forKey: .includePermissions) try container.encodeIfPresent(self.includeTags, forKey: .includeTags) try container.encode(self.resourceArns, forKey: .resourceArns) @@ -31844,6 +32183,8 @@ extension QuickSight { case cloudFormationOverridePropertyConfiguration = "CloudFormationOverridePropertyConfiguration" case exportFormat = "ExportFormat" case includeAllDependencies = "IncludeAllDependencies" + case includeFolderMembers = "IncludeFolderMembers" + case includeFolderMemberships = "IncludeFolderMemberships" case includePermissions = "IncludePermissions" case includeTags = "IncludeTags" case resourceArns = "ResourceArns" @@ -34549,6 +34890,20 @@ extension QuickSight { } } + public struct TopicConfigOptions: AWSEncodableShape & AWSDecodableShape { + /// Enables Amazon Q Business Insights for a Topic. + public let qBusinessInsightsEnabled: Bool? + + @inlinable + public init(qBusinessInsightsEnabled: Bool? = nil) { + self.qBusinessInsightsEnabled = qBusinessInsightsEnabled + } + + private enum CodingKeys: String, CodingKey { + case qBusinessInsightsEnabled = "QBusinessInsightsEnabled" + } + } + public struct TopicConstantValue: AWSEncodableShape & AWSDecodableShape { /// The constant type of a TopicConstantValue. public let constantType: ConstantType? @@ -34612,6 +34967,8 @@ extension QuickSight { } public struct TopicDetails: AWSEncodableShape & AWSDecodableShape { + /// Configuration options for a Topic. + public let configOptions: TopicConfigOptions? /// The data sets that the topic is associated with. public let dataSets: [DatasetMetadata]? /// The description of the topic. @@ -34622,7 +34979,8 @@ extension QuickSight { public let userExperienceVersion: TopicUserExperienceVersion? @inlinable - public init(dataSets: [DatasetMetadata]? = nil, description: String? = nil, name: String? = nil, userExperienceVersion: TopicUserExperienceVersion? = nil) { + public init(configOptions: TopicConfigOptions? = nil, dataSets: [DatasetMetadata]? = nil, description: String? = nil, name: String? = nil, userExperienceVersion: TopicUserExperienceVersion? = nil) { + self.configOptions = configOptions self.dataSets = dataSets self.description = description self.name = name @@ -34639,6 +34997,7 @@ extension QuickSight { } private enum CodingKeys: String, CodingKey { + case configOptions = "ConfigOptions" case dataSets = "DataSets" case description = "Description" case name = "Name" @@ -37782,6 +38141,65 @@ extension QuickSight { } } + public struct UpdateQPersonalizationConfigurationRequest: AWSEncodableShape { + /// The ID of the Amazon Web Services account account that contains the personalization configuration that the user wants to update. + public let awsAccountId: String + /// An option to allow Amazon QuickSight to customize data stories with user specific metadata, specifically location and job information, in your IAM Identity Center instance. + public let personalizationMode: PersonalizationMode + + @inlinable + public init(awsAccountId: String, personalizationMode: PersonalizationMode) { + self.awsAccountId = awsAccountId + self.personalizationMode = personalizationMode + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.awsAccountId, key: "AwsAccountId") + try container.encode(self.personalizationMode, forKey: .personalizationMode) + } + + public func validate(name: String) throws { + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, max: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, min: 12) + try self.validate(self.awsAccountId, name: "awsAccountId", parent: name, pattern: "^[0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case personalizationMode = "PersonalizationMode" + } + } + + public struct UpdateQPersonalizationConfigurationResponse: AWSDecodableShape { + /// The personalization mode that is used for the personalization configuration. + public let personalizationMode: PersonalizationMode? + /// The Amazon Web Services request ID for this operation. + public let requestId: String? + /// The HTTP status of the request. + public let status: Int? + + @inlinable + public init(personalizationMode: PersonalizationMode? = nil, requestId: String? = nil, status: Int? = nil) { + self.personalizationMode = personalizationMode + self.requestId = requestId + self.status = status + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.personalizationMode = try container.decodeIfPresent(PersonalizationMode.self, forKey: .personalizationMode) + self.requestId = try container.decodeIfPresent(String.self, forKey: .requestId) + self.status = response.decodeStatus() + } + + private enum CodingKeys: String, CodingKey { + case personalizationMode = "PersonalizationMode" + case requestId = "RequestId" + } + } + public struct UpdateRefreshScheduleRequest: AWSEncodableShape { /// The Amazon Web Services account ID. public let awsAccountId: String diff --git a/Sources/Soto/Services/RDS/RDS_api.swift b/Sources/Soto/Services/RDS/RDS_api.swift index 3cf2dc39f3..4a2ca22231 100644 --- a/Sources/Soto/Services/RDS/RDS_api.swift +++ b/Sources/Soto/Services/RDS/RDS_api.swift @@ -247,7 +247,7 @@ public struct RDS: AWSService { /// Applies a pending maintenance action to a resource (for example, to a DB instance). /// /// Parameters: - /// - applyAction: The pending maintenance action to apply to this resource. Valid Values: ca-certificate-rotation db-upgrade hardware-maintenance os-upgrade system-update For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS. + /// - applyAction: The pending maintenance action to apply to this resource. Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation /// - optInType: A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone. Valid Values: immediate - Apply the maintenance action immediately. next-maintenance - Apply the maintenance action during the next maintenance window for the resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. /// - resourceIdentifier: The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see Constructing an RDS Amazon Resource Name (ARN). /// - logger: Logger use during operation @@ -717,6 +717,7 @@ public struct RDS: AWSService { /// - backupRetentionPeriod: The number of days for which automated backups are retained. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Default: 1 Constraints: Must be a value from 1 to 35. /// - caCertificateIdentifier: The CA certificate identifier to use for the DB cluster's server certificate. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide. Valid for Cluster Type: Multi-AZ DB clusters /// - characterSetName: The name of the character set (CharacterSet) to associate the DB cluster with. Valid for Cluster Type: Aurora DB clusters only + /// - clusterScalabilityType: Specifies the scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database. When set to standard (the default), the cluster uses normal DB instance creation. Valid for: Aurora DB clusters only You can't modify this setting after you create the DB cluster. /// - copyTagsToSnapshot: Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - databaseName: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters /// - dbClusterIdentifier: The identifier for this DB cluster. This parameter is stored as a lowercase string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ DB clusters) letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. Example: my-cluster1 @@ -774,6 +775,7 @@ public struct RDS: AWSService { backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, + clusterScalabilityType: ClusterScalabilityType? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, @@ -831,6 +833,7 @@ public struct RDS: AWSService { backupRetentionPeriod: backupRetentionPeriod, caCertificateIdentifier: caCertificateIdentifier, characterSetName: characterSetName, + clusterScalabilityType: clusterScalabilityType, copyTagsToSnapshot: copyTagsToSnapshot, databaseName: databaseName, dbClusterIdentifier: dbClusterIdentifier, @@ -1049,7 +1052,7 @@ public struct RDS: AWSService { /// - engineVersion: The version number of the database engine to use. This setting doesn't apply to Amazon Aurora DB instances. The version number of the database engine the DB instance uses is managed by the DB cluster. For a list of valid engine versions, use the DescribeDBEngineVersions operation. The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every Amazon Web Services Region. Amazon RDS Custom for Oracle A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV name has the following format: 19.customized_string. A valid CEV name is 19.my_cev1. For more information, see Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide. Amazon RDS Custom for SQL Server See RDS Custom for SQL Server general requirements in the Amazon RDS User Guide. RDS for Db2 For information, see Db2 on Amazon RDS versions in the Amazon RDS User Guide. RDS for MariaDB For information, see MariaDB on Amazon RDS versions in the Amazon RDS User Guide. RDS for Microsoft SQL Server For information, see Microsoft SQL Server versions on Amazon RDS in the Amazon RDS User Guide. RDS for MySQL For information, see MySQL on Amazon RDS versions in the Amazon RDS User Guide. RDS for Oracle For information, see Oracle Database Engine release notes in the Amazon RDS User Guide. RDS for PostgreSQL For information, see Amazon RDS for PostgreSQL versions and extensions in the Amazon RDS User Guide. /// - iops: The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. For information about valid IOPS values, see Amazon RDS DB instance storage in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster. Constraints: For RDS for Db2, MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the storage amount for the DB instance. For RDS for SQL Server - Must be a multiple between 1 and 50 of the storage amount for the DB instance. /// - kmsKeyId: The Amazon Web Services KMS key identifier for an encrypted DB instance. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. This setting doesn't apply to Amazon Aurora DB instances. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster. If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. For Amazon RDS Custom, a KMS key is required for DB instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key. - /// - licenseModel: The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license + /// - licenseModel: The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license /// - manageMasterUserPassword: Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. /// - masterUsername: The name for the master user. This setting doesn't apply to Amazon Aurora DB instances. The name for the master user is managed by the DB cluster. This setting is required for RDS DB instances. Constraints: Must be 1 to 16 letters, numbers, or underscores. First character must be a letter. Can't be a reserved word for the chosen database engine. /// - masterUserPassword: The password for the master user. This setting doesn't apply to Amazon Aurora DB instances. The password for the master user is managed by the DB cluster. Constraints: Can't be specified if ManageMasterUserPassword is turned on. Can include any printable ASCII character except "/", """, or "@". For RDS for Oracle, can't include the "&" (ampersand) or the "'" (single quotes) character. Length Constraints: RDS for Db2 - Must contain from 8 to 255 characters. RDS for MariaDB - Must contain from 8 to 41 characters. RDS for Microsoft SQL Server - Must contain from 8 to 128 characters. RDS for MySQL - Must contain from 8 to 41 characters. RDS for Oracle - Must contain from 8 to 30 characters. RDS for PostgreSQL - Must contain from 8 to 128 characters. @@ -1238,7 +1241,7 @@ public struct RDS: AWSService { /// - customIamInstanceProfile: The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance. The instance profile must meet the following requirements: The profile must exist in your account. The profile must have an IAM role that Amazon EC2 has permissions to assume. The instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom. For the list of permissions required for the IAM role, see Configure IAM and your VPC in the Amazon RDS User Guide. This setting is required for RDS Custom DB instances. /// - dbInstanceClass: The compute and memory capacity of the read replica, for example db.m4.large. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide. Default: Inherits the value from the source DB instance. /// - dbInstanceIdentifier: The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. - /// - dbParameterGroupName: The name of the DB parameter group to associate with this DB instance. If you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. + /// - dbParameterGroupName: The name of the DB parameter group to associate with this read replica DB instance. For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. /// - dbSubnetGroupName: A DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC. Constraints: If supplied, must match the name of an existing DB subnet group. The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running. All read replicas in one Amazon Web Services Region that are created from the same source DB instance must either: Specify DB subnet groups from the same VPC. All these read replicas are created in the same VPC. Not specify a DB subnet group. All these read replicas are created outside of any VPC. Example: mydbsubnetgroup /// - dedicatedLogVolume: Indicates whether the DB instance has a dedicated log volume (DLV) enabled. /// - deletionProtection: Specifies whether to enable deletion protection for the DB instance. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see Deleting a DB Instance. @@ -1564,12 +1567,13 @@ public struct RDS: AWSService { /// Creates a new DB shard group for Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only /// /// Parameters: - /// - computeRedundancy: Specifies whether to create standby instances for the DB shard group. Valid values are the following: 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + /// - computeRedundancy: Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: 0 - Creates a DB shard group without a standby DB shard group. This is the default value. 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. /// - dbClusterIdentifier: The name of the primary DB cluster for the DB shard group. /// - dbShardGroupIdentifier: The name of the DB shard group. /// - maxACU: The maximum capacity of the DB shard group in Aurora capacity units (ACUs). /// - minACU: The minimum capacity of the DB shard group in Aurora capacity units (ACUs). /// - publiclyAccessible: Specifies whether the DB shard group is publicly accessible. When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB shard group doesn't permit it. When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB shard group is private. If the default VPC in the target Region has an internet gateway attached to it, the DB shard group is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB shard group is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB shard group is public. + /// - tags: /// - logger: Logger use during operation @inlinable public func createDBShardGroup( @@ -1579,6 +1583,7 @@ public struct RDS: AWSService { maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil, + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DBShardGroup { let input = CreateDBShardGroupMessage( @@ -1587,7 +1592,8 @@ public struct RDS: AWSService { dbShardGroupIdentifier: dbShardGroupIdentifier, maxACU: maxACU, minACU: minACU, - publiclyAccessible: publiclyAccessible + publiclyAccessible: publiclyAccessible, + tags: tags ) return try await self.createDBShardGroup(input, logger: logger) } @@ -1736,6 +1742,7 @@ public struct RDS: AWSService { /// - globalClusterIdentifier: The cluster identifier for this global database cluster. This parameter is stored as a lowercase string. /// - sourceDBClusterIdentifier: The Amazon Resource Name (ARN) to use as the primary cluster of the global database. If you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster: DatabaseName Engine EngineVersion StorageEncrypted /// - storageEncrypted: Specifies whether to enable storage encryption for the new global database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the setting from the source DB cluster. + /// - tags: Tags to assign to the global cluster. /// - logger: Logger use during operation @inlinable public func createGlobalCluster( @@ -1747,6 +1754,7 @@ public struct RDS: AWSService { globalClusterIdentifier: String? = nil, sourceDBClusterIdentifier: String? = nil, storageEncrypted: Bool? = nil, + tags: [Tag]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateGlobalClusterResult { let input = CreateGlobalClusterMessage( @@ -1757,7 +1765,8 @@ public struct RDS: AWSService { engineVersion: engineVersion, globalClusterIdentifier: globalClusterIdentifier, sourceDBClusterIdentifier: sourceDBClusterIdentifier, - storageEncrypted: storageEncrypted + storageEncrypted: storageEncrypted, + tags: tags ) return try await self.createGlobalCluster(input, logger: logger) } @@ -4603,7 +4612,7 @@ public struct RDS: AWSService { return try await self.failoverGlobalCluster(input, logger: logger) } - /// Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide. + /// Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide. @Sendable @inlinable public func listTagsForResource(_ input: ListTagsForResourceMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> TagListMessage { @@ -4616,7 +4625,7 @@ public struct RDS: AWSService { logger: logger ) } - /// Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide. + /// Lists all tags on an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide. /// /// Parameters: /// - filters: This parameter isn't currently supported. @@ -5459,18 +5468,21 @@ public struct RDS: AWSService { /// Modifies the settings of an Aurora Limitless Database DB shard group. You can change one or more settings by specifying these parameters and the new values in the request. /// /// Parameters: + /// - computeRedundancy: Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: 0 - Creates a DB shard group without a standby DB shard group. This is the default value. 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. /// - dbShardGroupIdentifier: The name of the DB shard group to modify. /// - maxACU: The maximum capacity of the DB shard group in Aurora capacity units (ACUs). /// - minACU: The minimum capacity of the DB shard group in Aurora capacity units (ACUs). /// - logger: Logger use during operation @inlinable public func modifyDBShardGroup( + computeRedundancy: Int? = nil, dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> DBShardGroup { let input = ModifyDBShardGroupMessage( + computeRedundancy: computeRedundancy, dbShardGroupIdentifier: dbShardGroupIdentifier, maxACU: maxACU, minACU: minACU @@ -6146,7 +6158,7 @@ public struct RDS: AWSService { return try await self.removeSourceIdentifierFromSubscription(input, logger: logger) } - /// Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide. + /// Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide. @Sendable @inlinable public func removeTagsFromResource(_ input: RemoveTagsFromResourceMessage, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -6159,7 +6171,7 @@ public struct RDS: AWSService { logger: logger ) } - /// Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide. + /// Removes metadata tags from an Amazon RDS resource. For an overview on tagging an Amazon RDS resource, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide. /// /// Parameters: /// - resourceName: The Amazon RDS resource that the tags are removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide. @@ -6617,7 +6629,7 @@ public struct RDS: AWSService { return try await self.restoreDBClusterToPointInTime(input, logger: logger) } - /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. + /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. @Sendable @inlinable public func restoreDBInstanceFromDBSnapshot(_ input: RestoreDBInstanceFromDBSnapshotMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> RestoreDBInstanceFromDBSnapshotResult { @@ -6630,7 +6642,7 @@ public struct RDS: AWSService { logger: logger ) } - /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. + /// Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most of the source's original configuration, including the default security group and DB parameter group. By default, the new DB instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment. If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original DB instance with the DB instance created from the snapshot. If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB snapshot. To restore from a DB snapshot with an unsupported engine version, you must first upgrade the engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version. This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot. /// /// Parameters: /// - allocatedStorage: The amount of storage (in gibibytes) to allocate initially for the DB instance. Follow the allocation rules specified in CreateDBInstance. Be sure to allocate enough storage for your new DB instance so that the restore operation can succeed. You can also allocate additional storage for future growth. @@ -6661,7 +6673,7 @@ public struct RDS: AWSService { /// - engine: The database engine to use for the new instance. This setting doesn't apply to RDS Custom. Default: The same as source Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. Valid Values: db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web /// - engineLifecycleSupport: The life cycle type for this DB instance. By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support /// - iops: Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter isn't specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts. The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be an integer greater than 1000. - /// - licenseModel: License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. + /// - licenseModel: License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. /// - multiAZ: Specifies whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. /// - networkType: The network type of the DB instance. Valid Values: IPV4 DUAL The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. /// - optionGroupName: The name of the option group to be used for the restored DB instance. Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance after it is associated with a DB instance. This setting doesn't apply to RDS Custom. @@ -6989,7 +7001,7 @@ public struct RDS: AWSService { /// - engine: The database engine to use for the new instance. This setting doesn't apply to RDS Custom. Valid Values: db2-ae db2-se mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web Default: The same as source Constraints: Must be compatible with the engine of the source. /// - engineLifecycleSupport: The life cycle type for this DB instance. By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Using Amazon RDS Extended Support in the Amazon RDS User Guide. This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster. Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support /// - iops: The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. This setting doesn't apply to SQL Server. Constraints: Must be an integer greater than 1000. - /// - licenseModel: The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. + /// - licenseModel: The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. /// - maxAllocatedStorage: The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. /// - multiAZ: Secifies whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom. Constraints: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. /// - networkType: The network type of the DB instance. The network type is determined by the DBSubnetGroup specified for the DB instance. A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 protocols (DUAL). For more information, see Working with a DB instance in a VPC in the Amazon RDS User Guide. Valid Values: IPV4 DUAL diff --git a/Sources/Soto/Services/RDS/RDS_shapes.swift b/Sources/Soto/Services/RDS/RDS_shapes.swift index c3f58fdd23..85fc1569b3 100644 --- a/Sources/Soto/Services/RDS/RDS_shapes.swift +++ b/Sources/Soto/Services/RDS/RDS_shapes.swift @@ -79,6 +79,12 @@ extension RDS { public var description: String { return self.rawValue } } + public enum ClusterScalabilityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case limitless = "limitless" + case standard = "standard" + public var description: String { return self.rawValue } + } + public enum CustomEngineVersionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case available = "available" case inactive = "inactive" @@ -376,7 +382,7 @@ extension RDS { } public struct ApplyPendingMaintenanceActionMessage: AWSEncodableShape { - /// The pending maintenance action to apply to this resource. Valid Values: ca-certificate-rotation db-upgrade hardware-maintenance os-upgrade system-update For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS. + /// The pending maintenance action to apply to this resource. Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation public let applyAction: String? /// A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type immediate can't be undone. Valid Values: immediate - Apply the maintenance action immediately. next-maintenance - Apply the maintenance action during the next maintenance window for the resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. public let optInType: String? @@ -1317,6 +1323,8 @@ extension RDS { public let caCertificateIdentifier: String? /// The name of the character set (CharacterSet) to associate the DB cluster with. Valid for Cluster Type: Aurora DB clusters only public let characterSetName: String? + /// Specifies the scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database. When set to standard (the default), the cluster uses normal DB instance creation. Valid for: Aurora DB clusters only You can't modify this setting after you create the DB cluster. + public let clusterScalabilityType: ClusterScalabilityType? /// Specifies whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters public let copyTagsToSnapshot: Bool? /// The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters @@ -1415,7 +1423,7 @@ extension RDS { public var vpcSecurityGroupIds: [String]? @inlinable - public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableGlobalWriteForwarding: Bool? = nil, enableHttpEndpoint: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enableLimitlessDatabase: Bool? = nil, enableLocalWriteForwarding: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, replicationSourceIdentifier: String? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { + public init(allocatedStorage: Int? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, caCertificateIdentifier: String? = nil, characterSetName: String? = nil, clusterScalabilityType: ClusterScalabilityType? = nil, copyTagsToSnapshot: Bool? = nil, databaseName: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterParameterGroupName: String? = nil, dbSubnetGroupName: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domain: String? = nil, domainIAMRoleName: String? = nil, enableCloudwatchLogsExports: [String]? = nil, enableGlobalWriteForwarding: Bool? = nil, enableHttpEndpoint: Bool? = nil, enableIAMDatabaseAuthentication: Bool? = nil, enableLimitlessDatabase: Bool? = nil, enableLocalWriteForwarding: Bool? = nil, enablePerformanceInsights: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, iops: Int? = nil, kmsKeyId: String? = nil, manageMasterUserPassword: Bool? = nil, masterUsername: String? = nil, masterUserPassword: String? = nil, masterUserSecretKmsKeyId: String? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, networkType: String? = nil, optionGroupName: String? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, preSignedUrl: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, replicationSourceIdentifier: String? = nil, scalingConfiguration: ScalingConfiguration? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfiguration? = nil, storageEncrypted: Bool? = nil, storageType: String? = nil, tags: [Tag]? = nil, vpcSecurityGroupIds: [String]? = nil) { self.allocatedStorage = allocatedStorage self.autoMinorVersionUpgrade = autoMinorVersionUpgrade self.availabilityZones = availabilityZones @@ -1423,6 +1431,7 @@ extension RDS { self.backupRetentionPeriod = backupRetentionPeriod self.caCertificateIdentifier = caCertificateIdentifier self.characterSetName = characterSetName + self.clusterScalabilityType = clusterScalabilityType self.copyTagsToSnapshot = copyTagsToSnapshot self.databaseName = databaseName self.dbClusterIdentifier = dbClusterIdentifier @@ -1480,6 +1489,7 @@ extension RDS { case backupRetentionPeriod = "BackupRetentionPeriod" case caCertificateIdentifier = "CACertificateIdentifier" case characterSetName = "CharacterSetName" + case clusterScalabilityType = "ClusterScalabilityType" case copyTagsToSnapshot = "CopyTagsToSnapshot" case databaseName = "DatabaseName" case dbClusterIdentifier = "DBClusterIdentifier" @@ -1700,7 +1710,7 @@ extension RDS { public let iops: Int? /// The Amazon Web Services KMS key identifier for an encrypted DB instance. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN. This setting doesn't apply to Amazon Aurora DB instances. The Amazon Web Services KMS key identifier is managed by the DB cluster. For more information, see CreateDBCluster. If StorageEncrypted is enabled, and you do not specify a value for the KmsKeyId parameter, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region. For Amazon RDS Custom, a KMS key is required for DB instances. For most RDS engines, if you leave this parameter empty while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom doesn't use the default key when this parameter is empty. You must explicitly specify a key. public let kmsKeyId: String? - /// The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license + /// The license model information for this DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. The default for RDS for Db2 is bring-your-own-license. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license public let licenseModel: String? /// Specifies whether to manage the master user password with Amazon Web Services Secrets Manager. For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide. Constraints: Can't manage the master user password with Amazon Web Services Secrets Manager if MasterUserPassword is specified. public let manageMasterUserPassword: Bool? @@ -1917,7 +1927,7 @@ extension RDS { public let dbInstanceClass: String? /// The DB instance identifier of the read replica. This identifier is the unique key that identifies a DB instance. This parameter is stored as a lowercase string. public let dbInstanceIdentifier: String? - /// The name of the DB parameter group to associate with this DB instance. If you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. + /// The name of the DB parameter group to associate with this read replica DB instance. For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the DBParameterGroup of the source DB instance for a same Region read replica, or the default DBParameterGroup for the specified DB engine for a cross-Region read replica. For Multi-AZ DB cluster same Region read replica instances, if you don't specify a value for DBParameterGroupName, then Amazon RDS uses the default DBParameterGroup. Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas, for Multi-AZ DB cluster read replica instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First character must be a letter. Can't end with a hyphen or contain two consecutive hyphens. public let dbParameterGroupName: String? /// A DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC. Constraints: If supplied, must match the name of an existing DB subnet group. The specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running. All read replicas in one Amazon Web Services Region that are created from the same source DB instance must either: Specify DB subnet groups from the same VPC. All these read replicas are created in the same VPC. Not specify a DB subnet group. All these read replicas are created outside of any VPC. Example: mydbsubnetgroup public let dbSubnetGroupName: String? @@ -2332,7 +2342,9 @@ extension RDS { } public struct CreateDBShardGroupMessage: AWSEncodableShape { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } + + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: 0 - Creates a DB shard group without a standby DB shard group. This is the default value. 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public let computeRedundancy: Int? /// The name of the primary DB cluster for the DB shard group. public let dbClusterIdentifier: String? @@ -2344,15 +2356,18 @@ extension RDS { public let minACU: Double? /// Specifies whether the DB shard group is publicly accessible. When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB shard group doesn't permit it. When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address. Default: The default behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies: If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB shard group is private. If the default VPC in the target Region has an internet gateway attached to it, the DB shard group is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies: If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB shard group is private. If the subnets are part of a VPC that has an internet gateway attached to it, the DB shard group is public. public let publiclyAccessible: Bool? + @OptionalCustomCoding> + public var tags: [Tag]? @inlinable - public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil) { + public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil, tags: [Tag]? = nil) { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier self.dbShardGroupIdentifier = dbShardGroupIdentifier self.maxACU = maxACU self.minACU = minACU self.publiclyAccessible = publiclyAccessible + self.tags = tags } private enum CodingKeys: String, CodingKey { @@ -2362,6 +2377,7 @@ extension RDS { case maxACU = "MaxACU" case minACU = "MinACU" case publiclyAccessible = "PubliclyAccessible" + case tags = "Tags" } } @@ -2504,6 +2520,8 @@ extension RDS { } public struct CreateGlobalClusterMessage: AWSEncodableShape { + public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } + /// The name for your database of up to 64 alphanumeric characters. If you don't specify a name, Amazon Aurora doesn't create a database in the global database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the database name from the source DB cluster. public let databaseName: String? /// Specifies whether to enable deletion protection for the new global database cluster. The global database can't be deleted when deletion protection is enabled. @@ -2520,9 +2538,12 @@ extension RDS { public let sourceDBClusterIdentifier: String? /// Specifies whether to enable storage encryption for the new global database cluster. Constraints: Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the setting from the source DB cluster. public let storageEncrypted: Bool? + /// Tags to assign to the global cluster. + @OptionalCustomCoding> + public var tags: [Tag]? @inlinable - public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, sourceDBClusterIdentifier: String? = nil, storageEncrypted: Bool? = nil) { + public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, globalClusterIdentifier: String? = nil, sourceDBClusterIdentifier: String? = nil, storageEncrypted: Bool? = nil, tags: [Tag]? = nil) { self.databaseName = databaseName self.deletionProtection = deletionProtection self.engine = engine @@ -2531,6 +2552,7 @@ extension RDS { self.globalClusterIdentifier = globalClusterIdentifier self.sourceDBClusterIdentifier = sourceDBClusterIdentifier self.storageEncrypted = storageEncrypted + self.tags = tags } private enum CodingKeys: String, CodingKey { @@ -2542,6 +2564,7 @@ extension RDS { case globalClusterIdentifier = "GlobalClusterIdentifier" case sourceDBClusterIdentifier = "SourceDBClusterIdentifier" case storageEncrypted = "StorageEncrypted" + case tags = "Tags" } } @@ -2784,6 +2807,8 @@ extension RDS { public let cloneGroupId: String? /// The time when the DB cluster was created, in Universal Coordinated Time (UTC). public let clusterCreateTime: Date? + /// The scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database. When set to standard (the default), the cluster uses normal DB instance creation. + public let clusterScalabilityType: ClusterScalabilityType? /// Indicates whether tags are copied from the DB cluster to snapshots of the DB cluster. public let copyTagsToSnapshot: Bool? /// Indicates whether the DB cluster is a clone of a DB cluster owned by a different Amazon Web Services account. @@ -2916,7 +2941,7 @@ extension RDS { public var vpcSecurityGroups: [VpcSecurityGroupMembership]? @inlinable - public init(activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBClusterRole]? = nil, automaticRestartTime: Date? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, awsBackupRecoveryPointArn: String? = nil, backtrackConsumedChangeRecords: Int64? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, capacity: Int? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, copyTagsToSnapshot: Bool? = nil, crossAccountClone: Bool? = nil, customEndpoints: [String]? = nil, databaseName: String? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterOptionGroupMemberships: [DBClusterOptionGroupStatus]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, earliestBacktrackTime: Date? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalWriteForwardingRequested: Bool? = nil, globalWriteForwardingStatus: WriteForwardingStatus? = nil, hostedZoneId: String? = nil, httpEndpointEnabled: Bool? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, ioOptimizedNextAllowedModificationTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, limitlessDatabase: LimitlessDatabase? = nil, localWriteForwardingStatus: LocalWriteForwardingStatus? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ClusterPendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, scalingConfigurationInfo: ScalingConfigurationInfo? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfigurationInfo? = nil, status: String? = nil, statusInfos: [DBClusterStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { + public init(activityStreamKinesisStreamName: String? = nil, activityStreamKmsKeyId: String? = nil, activityStreamMode: ActivityStreamMode? = nil, activityStreamStatus: ActivityStreamStatus? = nil, allocatedStorage: Int? = nil, associatedRoles: [DBClusterRole]? = nil, automaticRestartTime: Date? = nil, autoMinorVersionUpgrade: Bool? = nil, availabilityZones: [String]? = nil, awsBackupRecoveryPointArn: String? = nil, backtrackConsumedChangeRecords: Int64? = nil, backtrackWindow: Int64? = nil, backupRetentionPeriod: Int? = nil, capacity: Int? = nil, certificateDetails: CertificateDetails? = nil, characterSetName: String? = nil, cloneGroupId: String? = nil, clusterCreateTime: Date? = nil, clusterScalabilityType: ClusterScalabilityType? = nil, copyTagsToSnapshot: Bool? = nil, crossAccountClone: Bool? = nil, customEndpoints: [String]? = nil, databaseName: String? = nil, dbClusterArn: String? = nil, dbClusterIdentifier: String? = nil, dbClusterInstanceClass: String? = nil, dbClusterMembers: [DBClusterMember]? = nil, dbClusterOptionGroupMemberships: [DBClusterOptionGroupStatus]? = nil, dbClusterParameterGroup: String? = nil, dbClusterResourceId: String? = nil, dbSubnetGroup: String? = nil, dbSystemId: String? = nil, deletionProtection: Bool? = nil, domainMemberships: [DomainMembership]? = nil, earliestBacktrackTime: Date? = nil, earliestRestorableTime: Date? = nil, enabledCloudwatchLogsExports: [String]? = nil, endpoint: String? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineMode: String? = nil, engineVersion: String? = nil, globalWriteForwardingRequested: Bool? = nil, globalWriteForwardingStatus: WriteForwardingStatus? = nil, hostedZoneId: String? = nil, httpEndpointEnabled: Bool? = nil, iamDatabaseAuthenticationEnabled: Bool? = nil, ioOptimizedNextAllowedModificationTime: Date? = nil, iops: Int? = nil, kmsKeyId: String? = nil, latestRestorableTime: Date? = nil, limitlessDatabase: LimitlessDatabase? = nil, localWriteForwardingStatus: LocalWriteForwardingStatus? = nil, masterUsername: String? = nil, masterUserSecret: MasterUserSecret? = nil, monitoringInterval: Int? = nil, monitoringRoleArn: String? = nil, multiAZ: Bool? = nil, networkType: String? = nil, pendingModifiedValues: ClusterPendingModifiedValues? = nil, percentProgress: String? = nil, performanceInsightsEnabled: Bool? = nil, performanceInsightsKMSKeyId: String? = nil, performanceInsightsRetentionPeriod: Int? = nil, port: Int? = nil, preferredBackupWindow: String? = nil, preferredMaintenanceWindow: String? = nil, publiclyAccessible: Bool? = nil, rdsCustomClusterConfiguration: RdsCustomClusterConfiguration? = nil, readerEndpoint: String? = nil, readReplicaIdentifiers: [String]? = nil, replicationSourceIdentifier: String? = nil, scalingConfigurationInfo: ScalingConfigurationInfo? = nil, serverlessV2ScalingConfiguration: ServerlessV2ScalingConfigurationInfo? = nil, status: String? = nil, statusInfos: [DBClusterStatusInfo]? = nil, storageEncrypted: Bool? = nil, storageThroughput: Int? = nil, storageType: String? = nil, tagList: [Tag]? = nil, vpcSecurityGroups: [VpcSecurityGroupMembership]? = nil) { self.activityStreamKinesisStreamName = activityStreamKinesisStreamName self.activityStreamKmsKeyId = activityStreamKmsKeyId self.activityStreamMode = activityStreamMode @@ -2935,6 +2960,7 @@ extension RDS { self.characterSetName = characterSetName self.cloneGroupId = cloneGroupId self.clusterCreateTime = clusterCreateTime + self.clusterScalabilityType = clusterScalabilityType self.copyTagsToSnapshot = copyTagsToSnapshot self.crossAccountClone = crossAccountClone self.customEndpoints = customEndpoints @@ -3018,6 +3044,7 @@ extension RDS { case characterSetName = "CharacterSetName" case cloneGroupId = "CloneGroupId" case clusterCreateTime = "ClusterCreateTime" + case clusterScalabilityType = "ClusterScalabilityType" case copyTagsToSnapshot = "CopyTagsToSnapshot" case crossAccountClone = "CrossAccountClone" case customEndpoints = "CustomEndpoints" @@ -5071,10 +5098,14 @@ extension RDS { } public struct DBShardGroup: AWSDecodableShape { - /// Specifies whether to create standby instances for the DB shard group. Valid values are the following: 0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview. 1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. + public struct _TagListEncoding: ArrayCoderProperties { public static let member = "Tag" } + + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: 0 - Creates a DB shard group without a standby DB shard group. This is the default value. 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. public let computeRedundancy: Int? /// The name of the primary DB cluster for the DB shard group. public let dbClusterIdentifier: String? + /// The Amazon Resource Name (ARN) for the DB shard group. + public let dbShardGroupArn: String? /// The name of the DB shard group. public let dbShardGroupIdentifier: String? /// The Amazon Web Services Region-unique, immutable identifier for the DB shard group. @@ -5089,11 +5120,14 @@ extension RDS { public let publiclyAccessible: Bool? /// The status of the DB shard group. public let status: String? + @OptionalCustomCoding> + public var tagList: [Tag]? @inlinable - public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupIdentifier: String? = nil, dbShardGroupResourceId: String? = nil, endpoint: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil, status: String? = nil) { + public init(computeRedundancy: Int? = nil, dbClusterIdentifier: String? = nil, dbShardGroupArn: String? = nil, dbShardGroupIdentifier: String? = nil, dbShardGroupResourceId: String? = nil, endpoint: String? = nil, maxACU: Double? = nil, minACU: Double? = nil, publiclyAccessible: Bool? = nil, status: String? = nil, tagList: [Tag]? = nil) { self.computeRedundancy = computeRedundancy self.dbClusterIdentifier = dbClusterIdentifier + self.dbShardGroupArn = dbShardGroupArn self.dbShardGroupIdentifier = dbShardGroupIdentifier self.dbShardGroupResourceId = dbShardGroupResourceId self.endpoint = endpoint @@ -5101,11 +5135,13 @@ extension RDS { self.minACU = minACU self.publiclyAccessible = publiclyAccessible self.status = status + self.tagList = tagList } private enum CodingKeys: String, CodingKey { case computeRedundancy = "ComputeRedundancy" case dbClusterIdentifier = "DBClusterIdentifier" + case dbShardGroupArn = "DBShardGroupArn" case dbShardGroupIdentifier = "DBShardGroupIdentifier" case dbShardGroupResourceId = "DBShardGroupResourceId" case endpoint = "Endpoint" @@ -5113,6 +5149,7 @@ extension RDS { case minACU = "MinACU" case publiclyAccessible = "PubliclyAccessible" case status = "Status" + case tagList = "TagList" } } @@ -8487,6 +8524,7 @@ extension RDS { public struct GlobalCluster: AWSDecodableShape { public struct _GlobalClusterMembersEncoding: ArrayCoderProperties { public static let member = "GlobalClusterMember" } + public struct _TagListEncoding: ArrayCoderProperties { public static let member = "Tag" } /// The default database name within the new global database cluster. public let databaseName: String? @@ -8513,9 +8551,11 @@ extension RDS { public let status: String? /// The storage encryption setting for the global database cluster. public let storageEncrypted: Bool? + @OptionalCustomCoding> + public var tagList: [Tag]? @inlinable - public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, failoverState: FailoverState? = nil, globalClusterArn: String? = nil, globalClusterIdentifier: String? = nil, globalClusterMembers: [GlobalClusterMember]? = nil, globalClusterResourceId: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil) { + public init(databaseName: String? = nil, deletionProtection: Bool? = nil, engine: String? = nil, engineLifecycleSupport: String? = nil, engineVersion: String? = nil, failoverState: FailoverState? = nil, globalClusterArn: String? = nil, globalClusterIdentifier: String? = nil, globalClusterMembers: [GlobalClusterMember]? = nil, globalClusterResourceId: String? = nil, status: String? = nil, storageEncrypted: Bool? = nil, tagList: [Tag]? = nil) { self.databaseName = databaseName self.deletionProtection = deletionProtection self.engine = engine @@ -8528,6 +8568,7 @@ extension RDS { self.globalClusterResourceId = globalClusterResourceId self.status = status self.storageEncrypted = storageEncrypted + self.tagList = tagList } private enum CodingKeys: String, CodingKey { @@ -8543,6 +8584,7 @@ extension RDS { case globalClusterResourceId = "GlobalClusterResourceId" case status = "Status" case storageEncrypted = "StorageEncrypted" + case tagList = "TagList" } } @@ -9770,6 +9812,8 @@ extension RDS { } public struct ModifyDBShardGroupMessage: AWSEncodableShape { + /// Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following: 0 - Creates a DB shard group without a standby DB shard group. This is the default value. 1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ). 2 - Creates a DB shard group with two standby DB shard groups in two different AZs. + public let computeRedundancy: Int? /// The name of the DB shard group to modify. public let dbShardGroupIdentifier: String? /// The maximum capacity of the DB shard group in Aurora capacity units (ACUs). @@ -9778,7 +9822,8 @@ extension RDS { public let minACU: Double? @inlinable - public init(dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil) { + public init(computeRedundancy: Int? = nil, dbShardGroupIdentifier: String? = nil, maxACU: Double? = nil, minACU: Double? = nil) { + self.computeRedundancy = computeRedundancy self.dbShardGroupIdentifier = dbShardGroupIdentifier self.maxACU = maxACU self.minACU = minACU @@ -9791,6 +9836,7 @@ extension RDS { } private enum CodingKeys: String, CodingKey { + case computeRedundancy = "ComputeRedundancy" case dbShardGroupIdentifier = "DBShardGroupIdentifier" case maxACU = "MaxACU" case minACU = "MinACU" @@ -10804,7 +10850,7 @@ extension RDS { } public struct PendingMaintenanceAction: AWSDecodableShape { - /// The type of pending maintenance action that is available for the resource. For more information about maintenance actions, see Maintaining a DB instance. Valid Values: ca-certificate-rotation db-upgrade hardware-maintenance os-upgrade system-update For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS. + /// The type of pending maintenance action that is available for the resource. For more information about maintenance actions, see Maintaining a DB instance. Valid Values: system-update | db-upgrade | hardware-maintenance | ca-certificate-rotation public let action: String? /// The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. public let autoAppliedAfterDate: Date? @@ -12334,7 +12380,7 @@ extension RDS { public let engineLifecycleSupport: String? /// Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O operations per second. If this parameter isn't specified, the IOPS value is taken from the backup. If this parameter is set to 0, the new instance is converted to a non-PIOPS instance. The conversion takes additional time, though your DB instance is available for connections before the conversion starts. The provisioned IOPS value must follow the requirements for your database engine. For more information, see Amazon RDS Provisioned IOPS storage in the Amazon RDS User Guide. Constraints: Must be an integer greater than 1000. public let iops: Int? - /// License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. + /// License model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. public let licenseModel: String? /// Specifies whether the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom. Constraint: You can't specify the AvailabilityZone parameter if the DB instance is a Multi-AZ deployment. public let multiAZ: Bool? @@ -12764,7 +12810,7 @@ extension RDS { public let engineLifecycleSupport: String? /// The amount of Provisioned IOPS (input/output operations per second) to initially allocate for the DB instance. This setting doesn't apply to SQL Server. Constraints: Must be an integer greater than 1000. public let iops: Int? - /// The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. + /// The license model information for the restored DB instance. License models for RDS for Db2 require additional configuration. The Bring Your Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more information, see Amazon RDS for Db2 licensing options in the Amazon RDS User Guide. This setting doesn't apply to Amazon Aurora or RDS Custom DB instances. Valid Values: RDS for Db2 - bring-your-own-license | marketplace-license RDS for MariaDB - general-public-license RDS for Microsoft SQL Server - license-included RDS for MySQL - general-public-license RDS for Oracle - bring-your-own-license | license-included RDS for PostgreSQL - postgresql-license Default: Same as the source. public let licenseModel: String? /// The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance. For more information about this setting, including limitations that apply to it, see Managing capacity automatically with Amazon RDS storage autoscaling in the Amazon RDS User Guide. This setting doesn't apply to RDS Custom. public let maxAllocatedStorage: Int? @@ -14128,7 +14174,6 @@ public struct RDSErrorType: AWSErrorType { case invalidExportTaskStateFault = "InvalidExportTaskStateFault" case invalidGlobalClusterStateFault = "InvalidGlobalClusterStateFault" case invalidIntegrationStateFault = "InvalidIntegrationStateFault" - case invalidMaxAcuFault = "InvalidMaxAcu" case invalidOptionGroupStateFault = "InvalidOptionGroupStateFault" case invalidResourceStateFault = "InvalidResourceStateFault" case invalidRestoreFault = "InvalidRestoreFault" @@ -14398,8 +14443,6 @@ public struct RDSErrorType: AWSErrorType { public static var invalidGlobalClusterStateFault: Self { .init(.invalidGlobalClusterStateFault) } /// The integration is in an invalid state and can't perform the requested operation. public static var invalidIntegrationStateFault: Self { .init(.invalidIntegrationStateFault) } - /// The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs). - public static var invalidMaxAcuFault: Self { .init(.invalidMaxAcuFault) } /// The option group isn't in the available state. public static var invalidOptionGroupStateFault: Self { .init(.invalidOptionGroupStateFault) } /// The operation can't be performed because another operation is in progress. diff --git a/Sources/Soto/Services/RDSData/RDSData_api.swift b/Sources/Soto/Services/RDSData/RDSData_api.swift index 5c0f2f8b33..3bc9f1cb0f 100644 --- a/Sources/Soto/Services/RDSData/RDSData_api.swift +++ b/Sources/Soto/Services/RDSData/RDSData_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS RDSData service. /// -/// RDS Data API Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora DB cluster. To run these statements, you use the RDS Data API (Data API). Data API is available with the following types of Aurora databases: Aurora PostgreSQL - Serverless v2, Serverless v1, and provisioned Aurora MySQL - Serverless v1 only For more information about the Data API, see Using RDS Data API in the Amazon Aurora User Guide. +/// RDS Data API Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora DB cluster. To run these statements, you use the RDS Data API (Data API). Data API is available with the following types of Aurora databases: Aurora PostgreSQL - Serverless v2, provisioned, and Serverless v1 Aurora MySQL - Serverless v2, provisioned, and Serverless v1 For more information about the Data API, see Using RDS Data API in the Amazon Aurora User Guide. public struct RDSData: AWSService { // MARK: Member variables @@ -136,7 +136,7 @@ public struct RDSData: AWSService { return try await self.batchExecuteStatement(input, logger: logger) } - /// Starts a SQL transaction. A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24 hours. A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's committed, it's rolled back automatically. DDL statements inside a transaction cause an implicit commit. We recommend that you run each DDL statement in a separate ExecuteStatement call with continueAfterTimeout enabled. + /// Starts a SQL transaction. A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24 hours. A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's committed, it's rolled back automatically. For Aurora MySQL, DDL statements inside a transaction cause an implicit commit. We recommend that you run each MySQL DDL statement in a separate ExecuteStatement call with continueAfterTimeout enabled. @Sendable @inlinable public func beginTransaction(_ input: BeginTransactionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BeginTransactionResponse { @@ -149,7 +149,7 @@ public struct RDSData: AWSService { logger: logger ) } - /// Starts a SQL transaction. A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24 hours. A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's committed, it's rolled back automatically. DDL statements inside a transaction cause an implicit commit. We recommend that you run each DDL statement in a separate ExecuteStatement call with continueAfterTimeout enabled. + /// Starts a SQL transaction. A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24 hours. A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's committed, it's rolled back automatically. For Aurora MySQL, DDL statements inside a transaction cause an implicit commit. We recommend that you run each MySQL DDL statement in a separate ExecuteStatement call with continueAfterTimeout enabled. /// /// Parameters: /// - database: The name of the database. @@ -209,7 +209,7 @@ public struct RDSData: AWSService { return try await self.commitTransaction(input, logger: logger) } - /// Runs one or more SQL statements. This operation isn't supported for Aurora PostgreSQL Serverless v2 and provisioned DB clusters, and for Aurora Serverless v1 DB clusters, the operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation. + /// Runs one or more SQL statements. This operation isn't supported for Aurora Serverless v2 and provisioned DB clusters. For Aurora Serverless v1 DB clusters, the operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation. @available(*, deprecated, message: "The ExecuteSql API is deprecated, please use the ExecuteStatement API.") @Sendable @inlinable @@ -223,7 +223,7 @@ public struct RDSData: AWSService { logger: logger ) } - /// Runs one or more SQL statements. This operation isn't supported for Aurora PostgreSQL Serverless v2 and provisioned DB clusters, and for Aurora Serverless v1 DB clusters, the operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation. + /// Runs one or more SQL statements. This operation isn't supported for Aurora Serverless v2 and provisioned DB clusters. For Aurora Serverless v1 DB clusters, the operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation. /// /// Parameters: /// - awsSecretStoreArn: The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret. For information about creating the secret, see Create a database secret. diff --git a/Sources/Soto/Services/Redshift/Redshift_api.swift b/Sources/Soto/Services/Redshift/Redshift_api.swift index 36976f90e3..73d64b602a 100644 --- a/Sources/Soto/Services/Redshift/Redshift_api.swift +++ b/Sources/Soto/Services/Redshift/Redshift_api.swift @@ -585,7 +585,7 @@ public struct Redshift: AWSService { /// - masterUsername: The user name associated with the admin user account for the cluster that is being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide. /// - masterUserPassword: The password associated with the admin user account for the cluster that is being created. You can't use MasterUserPassword if ManageMasterPassword is true. Constraints: Must be between 8 and 64 characters in length. Must contain at least one uppercase letter. Must contain at least one lowercase letter. Must contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), " (double quote), \, /, or @. /// - multiAZ: If true, Amazon Redshift will deploy the cluster in two Availability Zones (AZ). - /// - nodeType: The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge + /// - nodeType: The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge /// - numberOfNodes: The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster. Default: 1 Constraints: Value must be at least 1 and no more than 100. /// - port: The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535. /// - preferredMaintenanceWindow: The weekly time range (in UTC) during which automated cluster maintenance can occur. Format: ddd:hh24:mi-ddd:hh24:mi Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week. For more information about the time blocks for each region, see Maintenance Windows in Amazon Redshift Cluster Management Guide. Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun Constraints: Minimum 30-minute window. @@ -1060,6 +1060,53 @@ public struct Redshift: AWSService { return try await self.createHsmConfiguration(input, logger: logger) } + /// Creates a zero-ETL integration with Amazon Redshift. + @Sendable + @inlinable + public func createIntegration(_ input: CreateIntegrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> Integration { + try await self.client.execute( + operation: "CreateIntegration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a zero-ETL integration with Amazon Redshift. + /// + /// Parameters: + /// - additionalEncryptionContext: An optional set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see Encryption context in the Amazon Web Services Key Management Service Developer Guide. You can only include this parameter if you specify the KMSKeyId parameter. + /// - description: A description of the integration. + /// - integrationName: The name of the integration. + /// - kmsKeyId: An Key Management Service (KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, the default Amazon Web Services owned key is used. + /// - sourceArn: The Amazon Resource Name (ARN) of the database to use as the source for replication. + /// - tagList: A list of tags. + /// - targetArn: The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication. + /// - logger: Logger use during operation + @inlinable + public func createIntegration( + additionalEncryptionContext: [String: String]? = nil, + description: String? = nil, + integrationName: String? = nil, + kmsKeyId: String? = nil, + sourceArn: String? = nil, + tagList: [Tag]? = nil, + targetArn: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> Integration { + let input = CreateIntegrationMessage( + additionalEncryptionContext: additionalEncryptionContext, + description: description, + integrationName: integrationName, + kmsKeyId: kmsKeyId, + sourceArn: sourceArn, + tagList: tagList, + targetArn: targetArn + ) + return try await self.createIntegration(input, logger: logger) + } + /// Creates an Amazon Redshift application for use with IAM Identity Center. @Sendable @inlinable @@ -1705,6 +1752,35 @@ public struct Redshift: AWSService { return try await self.deleteHsmConfiguration(input, logger: logger) } + /// Deletes a zero-ETL integration with Amazon Redshift. + @Sendable + @inlinable + public func deleteIntegration(_ input: DeleteIntegrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> Integration { + try await self.client.execute( + operation: "DeleteIntegration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Deletes a zero-ETL integration with Amazon Redshift. + /// + /// Parameters: + /// - integrationArn: The unique identifier of the integration to delete. + /// - logger: Logger use during operation + @inlinable + public func deleteIntegration( + integrationArn: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> Integration { + let input = DeleteIntegrationMessage( + integrationArn: integrationArn + ) + return try await self.deleteIntegration(input, logger: logger) + } + /// Deletes a partner integration from a cluster. Data can still flow to the cluster until the integration is deleted at the partner's website. @Sendable @inlinable @@ -2924,6 +3000,44 @@ public struct Redshift: AWSService { return try await self.describeInboundIntegrations(input, logger: logger) } + /// Describes one or more zero-ETL integrations with Amazon Redshift. + @Sendable + @inlinable + public func describeIntegrations(_ input: DescribeIntegrationsMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> IntegrationsMessage { + try await self.client.execute( + operation: "DescribeIntegrations", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Describes one or more zero-ETL integrations with Amazon Redshift. + /// + /// Parameters: + /// - filters: A filter that specifies one or more resources to return. + /// - integrationArn: The unique identifier of the integration. + /// - marker: An optional pagination token provided by a previous DescribeIntegrations request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. + /// - maxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum 20, maximum 100. + /// - logger: Logger use during operation + @inlinable + public func describeIntegrations( + filters: [DescribeIntegrationsFilter]? = nil, + integrationArn: String? = nil, + marker: String? = nil, + maxRecords: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> IntegrationsMessage { + let input = DescribeIntegrationsMessage( + filters: filters, + integrationArn: integrationArn, + marker: marker, + maxRecords: maxRecords + ) + return try await self.describeIntegrations(input, logger: logger) + } + /// Describes whether information, such as queries and connection attempts, is being logged for the specified Amazon Redshift cluster. @Sendable @inlinable @@ -3474,7 +3588,7 @@ public struct Redshift: AWSService { /// - marker: A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the marker parameter and retrying the command. If the marker field is empty, all response records have been retrieved for the request. /// - maxRecords: The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. /// - resourceName: The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1. - /// - resourceType: The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide. + /// - resourceType: The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide. /// - tagKeys: A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them. /// - tagValues: A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them. /// - logger: Logger use during operation @@ -3662,7 +3776,7 @@ public struct Redshift: AWSService { /// - clusterIdentifier: The identifier of the cluster on which logging is to be started. Example: examplecluster /// - logDestinationType: The log destination type. An enum with possible values of s3 and cloudwatch. /// - logExports: The collection of exported log types. Possible values are connectionlog, useractivitylog, and userlog. - /// - s3KeyPrefix: The prefix applied to the log file names. Constraints: Cannot exceed 512 characters Cannot contain spaces( ), double quotes ("), single quotes ('), a backslash (\), or control characters. The hexadecimal codes for invalid characters are: x00 to x20 x22 x27 x5c x7f or larger + /// - s3KeyPrefix: The prefix applied to the log file names. Valid characters are any letter from any language, any whitespace character, any numeric character, and the following characters: underscore (_), period (.), colon (:), slash (/), equal (=), plus (+), backslash (\), hyphen (-), at symbol (@). /// - logger: Logger use during operation @inlinable public func enableLogging( @@ -4535,6 +4649,41 @@ public struct Redshift: AWSService { return try await self.modifyEventSubscription(input, logger: logger) } + /// Modifies a zero-ETL integration with Amazon Redshift. + @Sendable + @inlinable + public func modifyIntegration(_ input: ModifyIntegrationMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> Integration { + try await self.client.execute( + operation: "ModifyIntegration", + path: "/", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Modifies a zero-ETL integration with Amazon Redshift. + /// + /// Parameters: + /// - description: A new description for the integration. + /// - integrationArn: The unique identifier of the integration to modify. + /// - integrationName: A new name for the integration. + /// - logger: Logger use during operation + @inlinable + public func modifyIntegration( + description: String? = nil, + integrationArn: String? = nil, + integrationName: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> Integration { + let input = ModifyIntegrationMessage( + description: description, + integrationArn: integrationArn, + integrationName: integrationName + ) + return try await self.modifyIntegration(input, logger: logger) + } + /// Changes an existing Amazon Redshift IAM Identity Center application. @Sendable @inlinable @@ -4929,7 +5078,7 @@ public struct Redshift: AWSService { return try await self.resetClusterParameterGroup(input, logger: logger) } - /// Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster. + /// Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.large ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster. @Sendable @inlinable public func resizeCluster(_ input: ResizeClusterMessage, logger: Logger = AWSClient.loggingDisabled) async throws -> ResizeClusterResult { @@ -4942,7 +5091,7 @@ public struct Redshift: AWSService { logger: logger ) } - /// Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster. + /// Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.large ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster. /// /// Parameters: /// - classic: A boolean value indicating whether the resize operation is using the classic resize process. If you don't provide this parameter or set the value to false, the resize type is elastic. @@ -6308,6 +6457,46 @@ extension Redshift { return self.describeInboundIntegrationsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``describeIntegrations(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func describeIntegrationsPaginator( + _ input: DescribeIntegrationsMessage, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.describeIntegrations, + inputKey: \DescribeIntegrationsMessage.marker, + outputKey: \IntegrationsMessage.marker, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``describeIntegrations(_:logger:)``. + /// + /// - Parameters: + /// - filters: A filter that specifies one or more resources to return. + /// - integrationArn: The unique identifier of the integration. + /// - maxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum 20, maximum 100. + /// - logger: Logger used for logging + @inlinable + public func describeIntegrationsPaginator( + filters: [DescribeIntegrationsFilter]? = nil, + integrationArn: String? = nil, + maxRecords: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = DescribeIntegrationsMessage( + filters: filters, + integrationArn: integrationArn, + maxRecords: maxRecords + ) + return self.describeIntegrationsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``describeNodeConfigurationOptions(_:logger:)``. /// /// - Parameters: @@ -6755,7 +6944,7 @@ extension Redshift { /// - Parameters: /// - maxRecords: The maximum number or response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. /// - resourceName: The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1. - /// - resourceType: The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide. + /// - resourceType: The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide. /// - tagKeys: A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them. /// - tagValues: A tag value or values for which you want to return all matching resources that are associated with the specified value or values. For example, suppose that you have resources tagged with values called admin and test. If you specify both of these tag values in the request, Amazon Redshift returns a response with all resources that have either or both of these tag values associated with them. /// - logger: Logger used for logging @@ -7218,6 +7407,18 @@ extension Redshift.DescribeInboundIntegrationsMessage: AWSPaginateToken { } } +extension Redshift.DescribeIntegrationsMessage: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Redshift.DescribeIntegrationsMessage { + return .init( + filters: self.filters, + integrationArn: self.integrationArn, + marker: token, + maxRecords: self.maxRecords + ) + } +} + extension Redshift.DescribeNodeConfigurationOptionsMessage: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Redshift.DescribeNodeConfigurationOptionsMessage { diff --git a/Sources/Soto/Services/Redshift/Redshift_shapes.swift b/Sources/Soto/Services/Redshift/Redshift_shapes.swift index 019d5a8713..4b29a3aefe 100644 --- a/Sources/Soto/Services/Redshift/Redshift_shapes.swift +++ b/Sources/Soto/Services/Redshift/Redshift_shapes.swift @@ -78,6 +78,14 @@ extension Redshift { public var description: String { return self.rawValue } } + public enum DescribeIntegrationsFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case integrationArn = "integration-arn" + case sourceArn = "source-arn" + case sourceTypes = "source-types" + case status = "status" + public var description: String { return self.rawValue } + } + public enum ImpactRankingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case high = "HIGH" case low = "LOW" @@ -1740,7 +1748,7 @@ extension Redshift { public let masterUserPassword: String? /// If true, Amazon Redshift will deploy the cluster in two Availability Zones (AZ). public let multiAZ: Bool? - /// The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge + /// The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge public let nodeType: String? /// The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. For information about determining how many nodes you need, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. If you don't specify this parameter, you get a single-node cluster. When requesting a multi-node cluster, you must specify the number of nodes that you want in the cluster. Default: 1 Constraints: Value must be at least 1 and no more than 100. public let numberOfNodes: Int? @@ -2385,6 +2393,66 @@ extension Redshift { } } + public struct CreateIntegrationMessage: AWSEncodableShape { + public struct _TagListEncoding: ArrayCoderProperties { public static let member = "Tag" } + + /// An optional set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see Encryption context in the Amazon Web Services Key Management Service Developer Guide. You can only include this parameter if you specify the KMSKeyId parameter. + @OptionalCustomCoding> + public var additionalEncryptionContext: [String: String]? + /// A description of the integration. + public let description: String? + /// The name of the integration. + public let integrationName: String? + /// An Key Management Service (KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, the default Amazon Web Services owned key is used. + public let kmsKeyId: String? + /// The Amazon Resource Name (ARN) of the database to use as the source for replication. + public let sourceArn: String? + /// A list of tags. + @OptionalCustomCoding> + public var tagList: [Tag]? + /// The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication. + public let targetArn: String? + + @inlinable + public init(additionalEncryptionContext: [String: String]? = nil, description: String? = nil, integrationName: String? = nil, kmsKeyId: String? = nil, sourceArn: String? = nil, tagList: [Tag]? = nil, targetArn: String? = nil) { + self.additionalEncryptionContext = additionalEncryptionContext + self.description = description + self.integrationName = integrationName + self.kmsKeyId = kmsKeyId + self.sourceArn = sourceArn + self.tagList = tagList + self.targetArn = targetArn + } + + public func validate(name: String) throws { + try self.additionalEncryptionContext?.forEach { + try validate($0.key, name: "additionalEncryptionContext.key", parent: name, max: 2147483647) + try validate($0.value, name: "additionalEncryptionContext[\"\($0.key)\"]", parent: name, max: 2147483647) + } + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.validate(self.description, name: "description", parent: name, pattern: "^.*$") + try self.validate(self.integrationName, name: "integrationName", parent: name, max: 63) + try self.validate(self.integrationName, name: "integrationName", parent: name, min: 1) + try self.validate(self.integrationName, name: "integrationName", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$") + try self.validate(self.kmsKeyId, name: "kmsKeyId", parent: name, max: 2147483647) + try self.validate(self.sourceArn, name: "sourceArn", parent: name, max: 2147483647) + try self.tagList?.forEach { + try $0.validate(name: "\(name).tagList[]") + } + try self.validate(self.targetArn, name: "targetArn", parent: name, max: 2147483647) + } + + private enum CodingKeys: String, CodingKey { + case additionalEncryptionContext = "AdditionalEncryptionContext" + case description = "Description" + case integrationName = "IntegrationName" + case kmsKeyId = "KMSKeyId" + case sourceArn = "SourceArn" + case tagList = "TagList" + case targetArn = "TargetArn" + } + } + public struct CreateRedshiftIdcApplicationMessage: AWSEncodableShape { /// The token issuer list for the Amazon Redshift IAM Identity Center application instance. @OptionalCustomCoding> @@ -3151,6 +3219,26 @@ extension Redshift { } } + public struct DeleteIntegrationMessage: AWSEncodableShape { + /// The unique identifier of the integration to delete. + public let integrationArn: String? + + @inlinable + public init(integrationArn: String? = nil) { + self.integrationArn = integrationArn + } + + public func validate(name: String) throws { + try self.validate(self.integrationArn, name: "integrationArn", parent: name, max: 255) + try self.validate(self.integrationArn, name: "integrationArn", parent: name, min: 1) + try self.validate(self.integrationArn, name: "integrationArn", parent: name, pattern: "^arn:aws[a-z\\-]*:redshift:[a-z0-9\\-]*:[0-9]*:integration:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case integrationArn = "IntegrationArn" + } + } + public struct DeleteRedshiftIdcApplicationMessage: AWSEncodableShape { /// The ARN for a deleted Amazon Redshift IAM Identity Center application. public let redshiftIdcApplicationArn: String? @@ -4260,6 +4348,72 @@ extension Redshift { } } + public struct DescribeIntegrationsFilter: AWSEncodableShape { + public struct _ValuesEncoding: ArrayCoderProperties { public static let member = "Value" } + + /// Specifies the type of integration filter. + public let name: DescribeIntegrationsFilterName? + /// Specifies the values to filter on. + @OptionalCustomCoding> + public var values: [String]? + + @inlinable + public init(name: DescribeIntegrationsFilterName? = nil, values: [String]? = nil) { + self.name = name + self.values = values + } + + public func validate(name: String) throws { + try self.values?.forEach { + try validate($0, name: "values[]", parent: name, max: 2147483647) + } + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + case values = "Values" + } + } + + public struct DescribeIntegrationsMessage: AWSEncodableShape { + public struct _FiltersEncoding: ArrayCoderProperties { public static let member = "DescribeIntegrationsFilter" } + + /// A filter that specifies one or more resources to return. + @OptionalCustomCoding> + public var filters: [DescribeIntegrationsFilter]? + /// The unique identifier of the integration. + public let integrationArn: String? + /// An optional pagination token provided by a previous DescribeIntegrations request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. + public let marker: String? + /// The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum 20, maximum 100. + public let maxRecords: Int? + + @inlinable + public init(filters: [DescribeIntegrationsFilter]? = nil, integrationArn: String? = nil, marker: String? = nil, maxRecords: Int? = nil) { + self.filters = filters + self.integrationArn = integrationArn + self.marker = marker + self.maxRecords = maxRecords + } + + public func validate(name: String) throws { + try self.filters?.forEach { + try $0.validate(name: "\(name).filters[]") + } + try self.validate(self.integrationArn, name: "integrationArn", parent: name, max: 255) + try self.validate(self.integrationArn, name: "integrationArn", parent: name, min: 1) + try self.validate(self.integrationArn, name: "integrationArn", parent: name, pattern: "^arn:aws[a-z\\-]*:redshift:[a-z0-9\\-]*:[0-9]*:integration:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.marker, name: "marker", parent: name, max: 2147483647) + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case integrationArn = "IntegrationArn" + case marker = "Marker" + case maxRecords = "MaxRecords" + } + } + public struct DescribeLoggingStatusMessage: AWSEncodableShape { /// The identifier of the cluster from which to get the logging status. Example: examplecluster public let clusterIdentifier: String? @@ -4805,7 +4959,7 @@ extension Redshift { public let maxRecords: Int? /// The Amazon Resource Name (ARN) for which you want to describe the tag or tags. For example, arn:aws:redshift:us-east-2:123456789:cluster:t1. public let resourceName: String? - /// The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide. + /// The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide. public let resourceType: String? /// A tag key or keys for which you want to return all matching resources that are associated with the specified key or keys. For example, suppose that you have resources tagged with keys called owner and environment. If you specify both of these tag keys in the request, Amazon Redshift returns a response with all resources that have either or both of these tag keys associated with them. @OptionalCustomCoding> @@ -5039,7 +5193,7 @@ extension Redshift { /// The collection of exported log types. Possible values are connectionlog, useractivitylog, and userlog. @OptionalCustomCoding> public var logExports: [String]? - /// The prefix applied to the log file names. Constraints: Cannot exceed 512 characters Cannot contain spaces( ), double quotes ("), single quotes ('), a backslash (\), or control characters. The hexadecimal codes for invalid characters are: x00 to x20 x22 x27 x5c x7f or larger + /// The prefix applied to the log file names. Valid characters are any letter from any language, any whitespace character, any numeric character, and the following characters: underscore (_), period (.), colon (:), slash (/), equal (=), plus (+), backslash (\), hyphen (-), at symbol (@). public let s3KeyPrefix: String? @inlinable @@ -5057,7 +5211,8 @@ extension Redshift { try self.logExports?.forEach { try validate($0, name: "logExports[]", parent: name, max: 2147483647) } - try self.validate(self.s3KeyPrefix, name: "s3KeyPrefix", parent: name, max: 2147483647) + try self.validate(self.s3KeyPrefix, name: "s3KeyPrefix", parent: name, max: 256) + try self.validate(self.s3KeyPrefix, name: "s3KeyPrefix", parent: name, pattern: "^[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$") } private enum CodingKeys: String, CodingKey { @@ -5949,6 +6104,66 @@ extension Redshift { } } + public struct Integration: AWSDecodableShape { + public struct _ErrorsEncoding: ArrayCoderProperties { public static let member = "IntegrationError" } + public struct _TagsEncoding: ArrayCoderProperties { public static let member = "Tag" } + + /// The encryption context for the integration. For more information, see Encryption context in the Amazon Web Services Key Management Service Developer Guide. + @OptionalCustomCoding> + public var additionalEncryptionContext: [String: String]? + /// The time (UTC) when the integration was created. + public let createTime: Date? + /// The description of the integration. + public let description: String? + /// Any errors associated with the integration. + @OptionalCustomCoding> + public var errors: [IntegrationError]? + /// The Amazon Resource Name (ARN) of the integration. + public let integrationArn: String? + /// The name of the integration. + public let integrationName: String? + /// The Key Management Service (KMS) key identifier for the key used to encrypt the integration. + public let kmsKeyId: String? + /// The Amazon Resource Name (ARN) of the database used as the source for replication. + public let sourceArn: String? + /// The current status of the integration. + public let status: ZeroETLIntegrationStatus? + /// The list of tags associated with the integration. + @OptionalCustomCoding> + public var tags: [Tag]? + /// The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication. + public let targetArn: String? + + @inlinable + public init(additionalEncryptionContext: [String: String]? = nil, createTime: Date? = nil, description: String? = nil, errors: [IntegrationError]? = nil, integrationArn: String? = nil, integrationName: String? = nil, kmsKeyId: String? = nil, sourceArn: String? = nil, status: ZeroETLIntegrationStatus? = nil, tags: [Tag]? = nil, targetArn: String? = nil) { + self.additionalEncryptionContext = additionalEncryptionContext + self.createTime = createTime + self.description = description + self.errors = errors + self.integrationArn = integrationArn + self.integrationName = integrationName + self.kmsKeyId = kmsKeyId + self.sourceArn = sourceArn + self.status = status + self.tags = tags + self.targetArn = targetArn + } + + private enum CodingKeys: String, CodingKey { + case additionalEncryptionContext = "AdditionalEncryptionContext" + case createTime = "CreateTime" + case description = "Description" + case errors = "Errors" + case integrationArn = "IntegrationArn" + case integrationName = "IntegrationName" + case kmsKeyId = "KMSKeyId" + case sourceArn = "SourceArn" + case status = "Status" + case tags = "Tags" + case targetArn = "TargetArn" + } + } + public struct IntegrationError: AWSDecodableShape { /// The error code of an inbound integration error. public let errorCode: String? @@ -5967,6 +6182,27 @@ extension Redshift { } } + public struct IntegrationsMessage: AWSDecodableShape { + public struct _IntegrationsEncoding: ArrayCoderProperties { public static let member = "Integration" } + + /// List of integrations that are described. + @OptionalCustomCoding> + public var integrations: [Integration]? + /// A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request. + public let marker: String? + + @inlinable + public init(integrations: [Integration]? = nil, marker: String? = nil) { + self.integrations = integrations + self.marker = marker + } + + private enum CodingKeys: String, CodingKey { + case integrations = "Integrations" + case marker = "Marker" + } + } + public struct LakeFormationQuery: AWSEncodableShape & AWSDecodableShape { /// Determines whether the query scope is enabled or disabled. public let authorization: ServiceAuthorization? @@ -6380,7 +6616,7 @@ extension Redshift { /// The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. /// For more information about resizing clusters, go to /// Resizing Clusters in Amazon Redshift - /// in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge + /// in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge public let nodeType: String? /// The new number of nodes of the cluster. If you specify a new number of nodes, you must also specify the node type parameter. /// For more information about resizing clusters, go to @@ -6795,6 +7031,39 @@ extension Redshift { } } + public struct ModifyIntegrationMessage: AWSEncodableShape { + /// A new description for the integration. + public let description: String? + /// The unique identifier of the integration to modify. + public let integrationArn: String? + /// A new name for the integration. + public let integrationName: String? + + @inlinable + public init(description: String? = nil, integrationArn: String? = nil, integrationName: String? = nil) { + self.description = description + self.integrationArn = integrationArn + self.integrationName = integrationName + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 1000) + try self.validate(self.description, name: "description", parent: name, pattern: "^.*$") + try self.validate(self.integrationArn, name: "integrationArn", parent: name, max: 255) + try self.validate(self.integrationArn, name: "integrationArn", parent: name, min: 1) + try self.validate(self.integrationArn, name: "integrationArn", parent: name, pattern: "^arn:aws[a-z\\-]*:redshift:[a-z0-9\\-]*:[0-9]*:integration:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$") + try self.validate(self.integrationName, name: "integrationName", parent: name, max: 63) + try self.validate(self.integrationName, name: "integrationName", parent: name, min: 1) + try self.validate(self.integrationName, name: "integrationName", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$") + } + + private enum CodingKeys: String, CodingKey { + case description = "Description" + case integrationArn = "IntegrationArn" + case integrationName = "IntegrationName" + } + } + public struct ModifyRedshiftIdcApplicationMessage: AWSEncodableShape { /// The authorized token issuer list for the Amazon Redshift IAM Identity Center application to change. @OptionalCustomCoding> @@ -9603,7 +9872,13 @@ public struct RedshiftErrorType: AWSErrorType { case incompatibleOrderableOptions = "IncompatibleOrderableOptions" case insufficientClusterCapacityFault = "InsufficientClusterCapacity" case insufficientS3BucketPolicyFault = "InsufficientS3BucketPolicyFault" + case integrationAlreadyExistsFault = "IntegrationAlreadyExistsFault" + case integrationConflictOperationFault = "IntegrationConflictOperationFault" + case integrationConflictStateFault = "IntegrationConflictStateFault" case integrationNotFoundFault = "IntegrationNotFoundFault" + case integrationQuotaExceededFault = "IntegrationQuotaExceededFault" + case integrationSourceNotFoundFault = "IntegrationSourceNotFoundFault" + case integrationTargetNotFoundFault = "IntegrationTargetNotFoundFault" case invalidAuthenticationProfileRequestFault = "InvalidAuthenticationProfileRequestFault" case invalidAuthorizationStateFault = "InvalidAuthorizationState" case invalidClusterParameterGroupStateFault = "InvalidClusterParameterGroupState" @@ -9828,8 +10103,20 @@ public struct RedshiftErrorType: AWSErrorType { public static var insufficientClusterCapacityFault: Self { .init(.insufficientClusterCapacityFault) } /// The cluster does not have read bucket or put object permissions on the S3 bucket specified when enabling logging. public static var insufficientS3BucketPolicyFault: Self { .init(.insufficientS3BucketPolicyFault) } + /// The integration you are trying to create already exists. + public static var integrationAlreadyExistsFault: Self { .init(.integrationAlreadyExistsFault) } + /// A conflicting conditional operation is currently in progress against this resource. This typically occurs when there are multiple requests being made to the same resource at the same time, and these requests conflict with each other. + public static var integrationConflictOperationFault: Self { .init(.integrationConflictOperationFault) } + /// The integration is in an invalid state and can't perform the requested operation. + public static var integrationConflictStateFault: Self { .init(.integrationConflictStateFault) } /// The integration can't be found. public static var integrationNotFoundFault: Self { .init(.integrationNotFoundFault) } + /// You can't create any more zero-ETL integrations because the quota has been reached. + public static var integrationQuotaExceededFault: Self { .init(.integrationQuotaExceededFault) } + /// The specified integration source can't be found. + public static var integrationSourceNotFoundFault: Self { .init(.integrationSourceNotFoundFault) } + /// The specified integration target can't be found. + public static var integrationTargetNotFoundFault: Self { .init(.integrationTargetNotFoundFault) } /// The authentication profile request is not valid. The profile name can't be null or empty. The authentication profile API operation must be available in the Amazon Web Services Region. public static var invalidAuthenticationProfileRequestFault: Self { .init(.invalidAuthenticationProfileRequestFault) } /// The status of the authorization is not valid. diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift index 315c1ff5e5..e4a6c1f5ba 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_api.swift @@ -200,6 +200,7 @@ public struct Resiliencehub: AWSService { /// /// Parameters: /// - assessmentSchedule: Assessment execution schedule with 'Daily' or 'Disabled' values. + /// - awsApplicationArn: Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, /// - clientToken: Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. /// - description: The optional description for an app. /// - eventSubscriptions: The list of events you would like to subscribe and get notification for. Currently, Resilience Hub supports only Drift detected and Scheduled assessment failure events notification. @@ -211,6 +212,7 @@ public struct Resiliencehub: AWSService { @inlinable public func createApp( assessmentSchedule: AppAssessmentScheduleType? = nil, + awsApplicationArn: String? = nil, clientToken: String? = CreateAppRequest.idempotencyToken(), description: String? = nil, eventSubscriptions: [EventSubscription]? = nil, @@ -222,6 +224,7 @@ public struct Resiliencehub: AWSService { ) async throws -> CreateAppResponse { let input = CreateAppRequest( assessmentSchedule: assessmentSchedule, + awsApplicationArn: awsApplicationArn, clientToken: clientToken, description: description, eventSubscriptions: eventSubscriptions, @@ -1089,7 +1092,7 @@ public struct Resiliencehub: AWSService { return try await self.listAlarmRecommendations(input, logger: logger) } - /// List of compliance drifts that were detected while running an assessment. + /// Indicates the list of compliance drifts that were detected while running an assessment. @Sendable @inlinable public func listAppAssessmentComplianceDrifts(_ input: ListAppAssessmentComplianceDriftsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListAppAssessmentComplianceDriftsResponse { @@ -1102,7 +1105,7 @@ public struct Resiliencehub: AWSService { logger: logger ) } - /// List of compliance drifts that were detected while running an assessment. + /// Indicates the list of compliance drifts that were detected while running an assessment. /// /// Parameters: /// - assessmentArn: Amazon Resource Name (ARN) of the assessment. The format for this ARN is: @@ -1492,6 +1495,7 @@ public struct Resiliencehub: AWSService { /// /// Parameters: /// - appArn: Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// - awsApplicationArn: Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, /// - fromLastAssessmentTime: Indicates the lower limit of the range that is used to filter applications based on their last assessment times. /// - maxResults: Maximum number of results to include in the response. If more results exist than the specified /// - name: The name for the one of the listed applications. @@ -1502,6 +1506,7 @@ public struct Resiliencehub: AWSService { @inlinable public func listApps( appArn: String? = nil, + awsApplicationArn: String? = nil, fromLastAssessmentTime: Date? = nil, maxResults: Int? = nil, name: String? = nil, @@ -1512,6 +1517,7 @@ public struct Resiliencehub: AWSService { ) async throws -> ListAppsResponse { let input = ListAppsRequest( appArn: appArn, + awsApplicationArn: awsApplicationArn, fromLastAssessmentTime: fromLastAssessmentTime, maxResults: maxResults, name: name, @@ -2819,6 +2825,7 @@ extension Resiliencehub { /// /// - Parameters: /// - appArn: Amazon Resource Name (ARN) of the Resilience Hub application. The format for this ARN is: + /// - awsApplicationArn: Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, /// - fromLastAssessmentTime: Indicates the lower limit of the range that is used to filter applications based on their last assessment times. /// - maxResults: Maximum number of results to include in the response. If more results exist than the specified /// - name: The name for the one of the listed applications. @@ -2828,6 +2835,7 @@ extension Resiliencehub { @inlinable public func listAppsPaginator( appArn: String? = nil, + awsApplicationArn: String? = nil, fromLastAssessmentTime: Date? = nil, maxResults: Int? = nil, name: String? = nil, @@ -2837,6 +2845,7 @@ extension Resiliencehub { ) -> AWSClient.PaginatorSequence { let input = ListAppsRequest( appArn: appArn, + awsApplicationArn: awsApplicationArn, fromLastAssessmentTime: fromLastAssessmentTime, maxResults: maxResults, name: name, @@ -3259,6 +3268,7 @@ extension Resiliencehub.ListAppsRequest: AWSPaginateToken { public func usingPaginationToken(_ token: String) -> Resiliencehub.ListAppsRequest { return .init( appArn: self.appArn, + awsApplicationArn: self.awsApplicationArn, fromLastAssessmentTime: self.fromLastAssessmentTime, maxResults: self.maxResults, name: self.name, diff --git a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift index ca438a0a5c..b9c5bd5ada 100644 --- a/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift +++ b/Sources/Soto/Services/Resiliencehub/Resiliencehub_shapes.swift @@ -72,7 +72,7 @@ extension Resiliencehub { public enum AssessmentStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case failed = "Failed" - case inProgress = "InProgress" + case inprogress = "InProgress" case pending = "Pending" case success = "Success" public var description: String { return self.rawValue } @@ -87,8 +87,8 @@ extension Resiliencehub { } public enum ConfigRecommendationOptimizationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case bestAZRecovery = "BestAZRecovery" case bestAttainable = "BestAttainable" + case bestAzRecovery = "BestAZRecovery" case bestRegionRecovery = "BestRegionRecovery" case leastChange = "LeastChange" case leastCost = "LeastCost" @@ -191,7 +191,7 @@ extension Resiliencehub { } public enum PermissionModelType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case legacyIAMUser = "LegacyIAMUser" + case legacyIamUser = "LegacyIAMUser" case roleBased = "RoleBased" public var description: String { return self.rawValue } } @@ -516,6 +516,9 @@ extension Resiliencehub { public let appArn: String /// Assessment execution schedule with 'Daily' or 'Disabled' values. public let assessmentSchedule: AppAssessmentScheduleType? + /// Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let awsApplicationArn: String? /// Current status of compliance for the resiliency policy. public let complianceStatus: AppComplianceStatusType? /// Date and time when the app was created. @@ -553,9 +556,10 @@ extension Resiliencehub { public let tags: [String: String]? @inlinable - public init(appArn: String, assessmentSchedule: AppAssessmentScheduleType? = nil, complianceStatus: AppComplianceStatusType? = nil, creationTime: Date, description: String? = nil, driftStatus: AppDriftStatusType? = nil, eventSubscriptions: [EventSubscription]? = nil, lastAppComplianceEvaluationTime: Date? = nil, lastDriftEvaluationTime: Date? = nil, lastResiliencyScoreEvaluationTime: Date? = nil, name: String, permissionModel: PermissionModel? = nil, policyArn: String? = nil, resiliencyScore: Double? = nil, rpoInSecs: Int? = nil, rtoInSecs: Int? = nil, status: AppStatusType? = nil, tags: [String: String]? = nil) { + public init(appArn: String, assessmentSchedule: AppAssessmentScheduleType? = nil, awsApplicationArn: String? = nil, complianceStatus: AppComplianceStatusType? = nil, creationTime: Date, description: String? = nil, driftStatus: AppDriftStatusType? = nil, eventSubscriptions: [EventSubscription]? = nil, lastAppComplianceEvaluationTime: Date? = nil, lastDriftEvaluationTime: Date? = nil, lastResiliencyScoreEvaluationTime: Date? = nil, name: String, permissionModel: PermissionModel? = nil, policyArn: String? = nil, resiliencyScore: Double? = nil, rpoInSecs: Int? = nil, rtoInSecs: Int? = nil, status: AppStatusType? = nil, tags: [String: String]? = nil) { self.appArn = appArn self.assessmentSchedule = assessmentSchedule + self.awsApplicationArn = awsApplicationArn self.complianceStatus = complianceStatus self.creationTime = creationTime self.description = description @@ -577,6 +581,7 @@ extension Resiliencehub { private enum CodingKeys: String, CodingKey { case appArn = "appArn" case assessmentSchedule = "assessmentSchedule" + case awsApplicationArn = "awsApplicationArn" case complianceStatus = "complianceStatus" case creationTime = "creationTime" case description = "description" @@ -633,7 +638,7 @@ extension Resiliencehub { public let resourceErrorsDetails: ResourceErrorsDetails? /// Starting time for the action. public let startTime: Date? - /// Indicates a concise summary that provides an overview of the Resilience Hub assessment. + /// Indicates the AI-generated summary for the Resilience Hub assessment, providing a concise overview that highlights the top risks and recommendations. This property is available only in the US East (N. Virginia) Region. public let summary: AssessmentSummary? /// Tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services resource. /// Each tag consists of a key/value pair. @@ -859,6 +864,9 @@ extension Resiliencehub { public let appArn: String /// Assessment execution schedule with 'Daily' or 'Disabled' values. public let assessmentSchedule: AppAssessmentScheduleType? + /// Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let awsApplicationArn: String? /// The current status of compliance for the resiliency policy. public let complianceStatus: AppComplianceStatusType? /// Date and time when the app was created. @@ -881,9 +889,10 @@ extension Resiliencehub { public let status: AppStatusType? @inlinable - public init(appArn: String, assessmentSchedule: AppAssessmentScheduleType? = nil, complianceStatus: AppComplianceStatusType? = nil, creationTime: Date, description: String? = nil, driftStatus: AppDriftStatusType? = nil, lastAppComplianceEvaluationTime: Date? = nil, name: String, resiliencyScore: Double? = nil, rpoInSecs: Int? = nil, rtoInSecs: Int? = nil, status: AppStatusType? = nil) { + public init(appArn: String, assessmentSchedule: AppAssessmentScheduleType? = nil, awsApplicationArn: String? = nil, complianceStatus: AppComplianceStatusType? = nil, creationTime: Date, description: String? = nil, driftStatus: AppDriftStatusType? = nil, lastAppComplianceEvaluationTime: Date? = nil, name: String, resiliencyScore: Double? = nil, rpoInSecs: Int? = nil, rtoInSecs: Int? = nil, status: AppStatusType? = nil) { self.appArn = appArn self.assessmentSchedule = assessmentSchedule + self.awsApplicationArn = awsApplicationArn self.complianceStatus = complianceStatus self.creationTime = creationTime self.description = description @@ -899,6 +908,7 @@ extension Resiliencehub { private enum CodingKeys: String, CodingKey { case appArn = "appArn" case assessmentSchedule = "assessmentSchedule" + case awsApplicationArn = "awsApplicationArn" case complianceStatus = "complianceStatus" case creationTime = "creationTime" case description = "description" @@ -1057,12 +1067,12 @@ extension Resiliencehub { /// Indicates the reason for excluding an operational recommendation. public let excludeReason: ExcludeRecommendationReason? /// The operational recommendation item. - public let item: UpdateRecommendationStatusItem + public let item: UpdateRecommendationStatusItem? /// Reference identifier of the operational recommendation. public let referenceId: String @inlinable - public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem, referenceId: String) { + public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { self.entryId = entryId self.excluded = excluded self.excludeReason = excludeReason @@ -1226,6 +1236,9 @@ extension Resiliencehub { public struct CreateAppRequest: AWSEncodableShape { /// Assessment execution schedule with 'Daily' or 'Disabled' values. public let assessmentSchedule: AppAssessmentScheduleType? + /// Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let awsApplicationArn: String? /// Used for an idempotency token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. /// You should not reuse the same client token for other API requests. public let clientToken: String? @@ -1246,8 +1259,9 @@ extension Resiliencehub { public let tags: [String: String]? @inlinable - public init(assessmentSchedule: AppAssessmentScheduleType? = nil, clientToken: String? = CreateAppRequest.idempotencyToken(), description: String? = nil, eventSubscriptions: [EventSubscription]? = nil, name: String, permissionModel: PermissionModel? = nil, policyArn: String? = nil, tags: [String: String]? = nil) { + public init(assessmentSchedule: AppAssessmentScheduleType? = nil, awsApplicationArn: String? = nil, clientToken: String? = CreateAppRequest.idempotencyToken(), description: String? = nil, eventSubscriptions: [EventSubscription]? = nil, name: String, permissionModel: PermissionModel? = nil, policyArn: String? = nil, tags: [String: String]? = nil) { self.assessmentSchedule = assessmentSchedule + self.awsApplicationArn = awsApplicationArn self.clientToken = clientToken self.description = description self.eventSubscriptions = eventSubscriptions @@ -1258,6 +1272,7 @@ extension Resiliencehub { } public func validate(name: String) throws { + try self.validate(self.awsApplicationArn, name: "awsApplicationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") try self.validate(self.clientToken, name: "clientToken", parent: name, max: 63) try self.validate(self.clientToken, name: "clientToken", parent: name, min: 1) try self.validate(self.clientToken, name: "clientToken", parent: name, pattern: "^[A-za-z0-9_.-]{0,63}$") @@ -1282,6 +1297,7 @@ extension Resiliencehub { private enum CodingKeys: String, CodingKey { case assessmentSchedule = "assessmentSchedule" + case awsApplicationArn = "awsApplicationArn" case clientToken = "clientToken" case description = "description" case eventSubscriptions = "eventSubscriptions" @@ -2415,7 +2431,9 @@ extension Resiliencehub { public let appArn: String /// The version of the application. public let appVersion: String - /// The returned error message for the request. + /// List of errors that were encountered while importing resources. + public let errorDetails: [ErrorDetail]? + /// The error message returned for the resource request. public let errorMessage: String? /// Status of the action. public let status: ResourceImportStatusType @@ -2423,9 +2441,10 @@ extension Resiliencehub { public let statusChangeTime: Date @inlinable - public init(appArn: String, appVersion: String, errorMessage: String? = nil, status: ResourceImportStatusType, statusChangeTime: Date) { + public init(appArn: String, appVersion: String, errorDetails: [ErrorDetail]? = nil, errorMessage: String? = nil, status: ResourceImportStatusType, statusChangeTime: Date) { self.appArn = appArn self.appVersion = appVersion + self.errorDetails = errorDetails self.errorMessage = errorMessage self.status = status self.statusChangeTime = statusChangeTime @@ -2434,6 +2453,7 @@ extension Resiliencehub { private enum CodingKeys: String, CodingKey { case appArn = "appArn" case appVersion = "appVersion" + case errorDetails = "errorDetails" case errorMessage = "errorMessage" case status = "status" case statusChangeTime = "statusChangeTime" @@ -2628,6 +2648,20 @@ extension Resiliencehub { } } + public struct ErrorDetail: AWSDecodableShape { + /// Provides additional information about the error. + public let errorMessage: String? + + @inlinable + public init(errorMessage: String? = nil) { + self.errorMessage = errorMessage + } + + private enum CodingKeys: String, CodingKey { + case errorMessage = "errorMessage" + } + } + public struct EventSubscription: AWSEncodableShape & AWSDecodableShape { /// The type of event you would like to subscribe and get notification for. Currently, Resilience Hub supports notifications only for Drift detected (DriftDetected) and Scheduled assessment failure (ScheduledAssessmentFailure) events. public let eventType: EventType @@ -3504,6 +3538,9 @@ extension Resiliencehub { /// arn:partition:resiliencehub:region:account:app/app-id. For more information about ARNs, /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. public let appArn: String? + /// Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, + /// see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference guide. + public let awsApplicationArn: String? /// Indicates the lower limit of the range that is used to filter applications based on their last assessment times. public let fromLastAssessmentTime: Date? /// Maximum number of results to include in the response. If more results exist than the specified @@ -3519,8 +3556,9 @@ extension Resiliencehub { public let toLastAssessmentTime: Date? @inlinable - public init(appArn: String? = nil, fromLastAssessmentTime: Date? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, reverseOrder: Bool? = nil, toLastAssessmentTime: Date? = nil) { + public init(appArn: String? = nil, awsApplicationArn: String? = nil, fromLastAssessmentTime: Date? = nil, maxResults: Int? = nil, name: String? = nil, nextToken: String? = nil, reverseOrder: Bool? = nil, toLastAssessmentTime: Date? = nil) { self.appArn = appArn + self.awsApplicationArn = awsApplicationArn self.fromLastAssessmentTime = fromLastAssessmentTime self.maxResults = maxResults self.name = name @@ -3533,6 +3571,7 @@ extension Resiliencehub { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodeQuery(self.appArn, key: "appArn") + request.encodeQuery(self.awsApplicationArn, key: "awsApplicationArn") request.encodeQuery(self.fromLastAssessmentTime, key: "fromLastAssessmentTime") request.encodeQuery(self.maxResults, key: "maxResults") request.encodeQuery(self.name, key: "name") @@ -3543,6 +3582,7 @@ extension Resiliencehub { public func validate(name: String) throws { try self.validate(self.appArn, name: "appArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") + try self.validate(self.awsApplicationArn, name: "awsApplicationArn", parent: name, pattern: "^arn:(aws|aws-cn|aws-iso|aws-iso-[a-z]{1}|aws-us-gov):[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:([a-z]{2}-((iso[a-z]{0,1}-)|(gov-)){0,1}[a-z]+-[0-9]):[0-9]{12}:[A-Za-z0-9/][A-Za-z0-9:_/+.-]{0,1023}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9][A-Za-z0-9_\\-]{1,59}$") @@ -5520,12 +5560,12 @@ extension Resiliencehub { /// Indicates the reason for excluding an operational recommendation. public let excludeReason: ExcludeRecommendationReason? /// The operational recommendation item. - public let item: UpdateRecommendationStatusItem + public let item: UpdateRecommendationStatusItem? /// Reference identifier of the operational recommendation item. public let referenceId: String @inlinable - public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem, referenceId: String) { + public init(entryId: String, excluded: Bool, excludeReason: ExcludeRecommendationReason? = nil, item: UpdateRecommendationStatusItem? = nil, referenceId: String) { self.entryId = entryId self.excluded = excluded self.excludeReason = excludeReason @@ -5536,7 +5576,7 @@ extension Resiliencehub { public func validate(name: String) throws { try self.validate(self.entryId, name: "entryId", parent: name, max: 255) try self.validate(self.entryId, name: "entryId", parent: name, min: 1) - try self.item.validate(name: "\(name).item") + try self.item?.validate(name: "\(name).item") try self.validate(self.referenceId, name: "referenceId", parent: name, max: 500) try self.validate(self.referenceId, name: "referenceId", parent: name, min: 1) } diff --git a/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_api.swift b/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_api.swift index 1f96ffe727..9f644c4057 100644 --- a/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_api.swift +++ b/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_api.swift @@ -65,6 +65,7 @@ public struct ResourceExplorer2: AWSService { serviceProtocol: .restjson, apiVersion: "2022-07-28", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: ResourceExplorer2ErrorType.self, middleware: middleware, timeout: timeout, @@ -76,6 +77,25 @@ public struct ResourceExplorer2: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.dualstack, .fips]: .init(endpoints: [ + "ca-central-1": "resource-explorer-2-fips.ca-central-1.api.aws", + "ca-west-1": "resource-explorer-2-fips.ca-west-1.api.aws", + "us-east-1": "resource-explorer-2-fips.us-east-1.api.aws", + "us-east-2": "resource-explorer-2-fips.us-east-2.api.aws", + "us-west-1": "resource-explorer-2-fips.us-west-1.api.aws", + "us-west-2": "resource-explorer-2-fips.us-west-2.api.aws" + ]), + [.fips]: .init(endpoints: [ + "ca-central-1": "resource-explorer-2-fips.ca-central-1.amazonaws.com", + "ca-west-1": "resource-explorer-2-fips.ca-west-1.amazonaws.com", + "us-east-1": "resource-explorer-2-fips.us-east-1.amazonaws.com", + "us-east-2": "resource-explorer-2-fips.us-east-2.amazonaws.com", + "us-west-1": "resource-explorer-2-fips.us-west-1.amazonaws.com", + "us-west-2": "resource-explorer-2-fips.us-west-2.amazonaws.com" + ]) + ]} // MARK: API Calls @@ -284,7 +304,7 @@ public struct ResourceExplorer2: AWSService { ) } - /// Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account or a delegated administrator with service access enabled can invoke this API call. + /// Retrieves the status of your account's Amazon Web Services service access, and validates the service linked role required to access the multi-account search feature. Only the management account can invoke this API call. @Sendable @inlinable public func getAccountLevelServiceConfiguration(logger: Logger = AWSClient.loggingDisabled) async throws -> GetAccountLevelServiceConfigurationOutput { @@ -425,6 +445,44 @@ public struct ResourceExplorer2: AWSService { return try await self.listIndexesForMembers(input, logger: logger) } + /// Returns a list of resources and their details that match the specified criteria. This query must use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region in which you call this operation. + @Sendable + @inlinable + public func listResources(_ input: ListResourcesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListResourcesOutput { + try await self.client.execute( + operation: "ListResources", + path: "/ListResources", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of resources and their details that match the specified criteria. This query must use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region in which you call this operation. + /// + /// Parameters: + /// - filters: + /// - maxResults: The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. + /// - nextToken: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. The pagination tokens expire after 24 hours. + /// - viewArn: Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception. + /// - logger: Logger use during operation + @inlinable + public func listResources( + filters: SearchFilter? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + viewArn: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListResourcesOutput { + let input = ListResourcesInput( + filters: filters, + maxResults: maxResults, + nextToken: nextToken, + viewArn: viewArn + ) + return try await self.listResources(input, logger: logger) + } + /// Retrieves a list of all resource types currently supported by Amazon Web Services Resource Explorer. @Sendable @inlinable @@ -786,6 +844,46 @@ extension ResourceExplorer2 { return self.listIndexesForMembersPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listResources(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listResourcesPaginator( + _ input: ListResourcesInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listResources, + inputKey: \ListResourcesInput.nextToken, + outputKey: \ListResourcesOutput.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listResources(_:logger:)``. + /// + /// - Parameters: + /// - filters: + /// - maxResults: The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. + /// - viewArn: Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception. + /// - logger: Logger used for logging + @inlinable + public func listResourcesPaginator( + filters: SearchFilter? = nil, + maxResults: Int? = nil, + viewArn: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListResourcesInput( + filters: filters, + maxResults: maxResults, + viewArn: viewArn + ) + return self.listResourcesPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listSupportedResourceTypes(_:logger:)``. /// /// - Parameters: @@ -918,6 +1016,18 @@ extension ResourceExplorer2.ListIndexesInput: AWSPaginateToken { } } +extension ResourceExplorer2.ListResourcesInput: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> ResourceExplorer2.ListResourcesInput { + return .init( + filters: self.filters, + maxResults: self.maxResults, + nextToken: token, + viewArn: self.viewArn + ) + } +} + extension ResourceExplorer2.ListSupportedResourceTypesInput: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> ResourceExplorer2.ListSupportedResourceTypesInput { diff --git a/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_shapes.swift b/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_shapes.swift index 95d98d133f..14665460ad 100644 --- a/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_shapes.swift +++ b/Sources/Soto/Services/ResourceExplorer2/ResourceExplorer2_shapes.swift @@ -519,6 +519,53 @@ extension ResourceExplorer2 { } } + public struct ListResourcesInput: AWSEncodableShape { + public let filters: SearchFilter? + /// The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. + public let maxResults: Int? + /// The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. The pagination tokens expire after 24 hours. + public let nextToken: String? + /// Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't specify a value for this parameter, then the operation automatically uses the default view for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have a default view or if you don't have permission to use the default view, then the operation fails with a 401 Unauthorized exception. + public let viewArn: String? + + @inlinable + public init(filters: SearchFilter? = nil, maxResults: Int? = nil, nextToken: String? = nil, viewArn: String? = nil) { + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + self.viewArn = viewArn + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case viewArn = "ViewArn" + } + } + + public struct ListResourcesOutput: AWSDecodableShape { + /// If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. The pagination tokens expire after 24 hours. + public let nextToken: String? + /// The list of structures that describe the resources that match the query. + public let resources: [Resource]? + /// The Amazon resource name (ARN) of the view that this operation used to perform the search. + public let viewArn: String? + + @inlinable + public init(nextToken: String? = nil, resources: [Resource]? = nil, viewArn: String? = nil) { + self.nextToken = nextToken + self.resources = resources + self.viewArn = viewArn + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case resources = "Resources" + case viewArn = "ViewArn" + } + } + public struct ListSupportedResourceTypesInput: AWSEncodableShape { /// The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results. public let maxResults: Int? @@ -680,7 +727,7 @@ extension ResourceExplorer2 { public let region: String? /// The type of the resource. public let resourceType: String? - /// The Amazon Web Service that owns the resource and is responsible for creating and updating it. + /// The Amazon Web Servicesservice that owns the resource and is responsible for creating and updating it. public let service: String? @inlinable @@ -778,7 +825,7 @@ extension ResourceExplorer2 { } public func validate(name: String) throws { - try self.validate(self.queryString, name: "queryString", parent: name, max: 1011) + try self.validate(self.queryString, name: "queryString", parent: name, max: 1280) } private enum CodingKeys: String, CodingKey { @@ -818,7 +865,7 @@ extension ResourceExplorer2 { public struct SupportedResourceType: AWSDecodableShape { /// The unique identifier of the resource type. public let resourceType: String? - /// The Amazon Web Service that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type. + /// The Amazon Web Servicesservice that is associated with the resource type. This is the primary service that lets you create and interact with resources of this type. public let service: String? @inlinable diff --git a/Sources/Soto/Services/ResourceGroups/ResourceGroups_api.swift b/Sources/Soto/Services/ResourceGroups/ResourceGroups_api.swift index 10dcc1b9c4..775d73804f 100644 --- a/Sources/Soto/Services/ResourceGroups/ResourceGroups_api.swift +++ b/Sources/Soto/Services/ResourceGroups/ResourceGroups_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS ResourceGroups service. /// -/// Resource Groups lets you organize Amazon Web Services resources such as Amazon Elastic Compute Cloud instances, Amazon Relational Database Service databases, and Amazon Simple Storage Service buckets into groups using criteria that you define as tags. A resource group is a collection of resources that match the resource types specified in a query, and share one or more tags or portions of tags. You can create a group of resources based on their roles in your cloud infrastructure, lifecycle stages, regions, application layers, or virtually any criteria. Resource Groups enable you to automate management tasks, such as those in Amazon Web Services Systems Manager Automation documents, on tag-related resources in Amazon Web Services Systems Manager. Groups of tagged resources also let you quickly view a custom console in Amazon Web Services Systems Manager that shows Config compliance and other monitoring data about member resources. To create a resource group, build a resource query, and specify tags that identify the criteria that members of the group have in common. Tags are key-value pairs. For more information about Resource Groups, see the Resource Groups User Guide. Resource Groups uses a REST-compliant API that you can use to perform the following types of operations. Create, Read, Update, and Delete (CRUD) operations on resource groups and resource query entities Applying, editing, and removing tags from resource groups Resolving resource group member ARNs so they can be returned as search results Getting data about resources that are members of a group Searching Amazon Web Services resources based on a resource query +/// Resource Groups lets you organize Amazon Web Services resources such as Amazon Elastic Compute Cloud instances, Amazon Relational Database Service databases, and Amazon Simple Storage Service buckets into groups using criteria that you define as tags. A resource group is a collection of resources that match the resource types specified in a query, and share one or more tags or portions of tags. You can create a group of resources based on their roles in your cloud infrastructure, lifecycle stages, regions, application layers, or virtually any criteria. Resource Groups enable you to automate management tasks, such as those in Amazon Web Services Systems Manager Automation documents, on tag-related resources in Amazon Web Services Systems Manager. Groups of tagged resources also let you quickly view a custom console in Amazon Web Services Systems Manager that shows Config compliance and other monitoring data about member resources. To create a resource group, build a resource query, and specify tags that identify the criteria that members of the group have in common. Tags are key-value pairs. For more information about Resource Groups, see the Resource Groups User Guide. Resource Groups uses a REST-compliant API that you can use to perform the following types of operations. Create, Read, Update, and Delete (CRUD) operations on resource groups and resource query entities Applying, editing, and removing tags from resource groups Resolving resource group member Amazon resource names (ARN)s so they can be returned as search results Getting data about resources that are members of a group Searching Amazon Web Services resources based on a resource query public struct ResourceGroups: AWSService { // MARK: Member variables @@ -91,6 +91,35 @@ public struct ResourceGroups: AWSService { // MARK: API Calls + /// Cancels the specified tag-sync task. Minimum permissions To run this command, you must have the following permissions: resource-groups:CancelTagSyncTask on the application group resource-groups:DeleteGroup + @Sendable + @inlinable + public func cancelTagSyncTask(_ input: CancelTagSyncTaskInput, logger: Logger = AWSClient.loggingDisabled) async throws { + try await self.client.execute( + operation: "CancelTagSyncTask", + path: "/cancel-tag-sync-task", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Cancels the specified tag-sync task. Minimum permissions To run this command, you must have the following permissions: resource-groups:CancelTagSyncTask on the application group resource-groups:DeleteGroup + /// + /// Parameters: + /// - taskArn: The Amazon resource name (ARN) of the tag-sync task. + /// - logger: Logger use during operation + @inlinable + public func cancelTagSyncTask( + taskArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws { + let input = CancelTagSyncTaskInput( + taskArn: taskArn + ) + return try await self.cancelTagSyncTask(input, logger: logger) + } + /// Creates a resource group with the specified name and description. You can optionally include either a resource query or a service configuration. For more information about constructing a resource query, see Build queries and groups in Resource Groups in the Resource Groups User Guide. For more information about service-linked groups and service configurations, see Service configurations for Resource Groups. Minimum permissions To run this command, you must have the following permissions: resource-groups:CreateGroup @Sendable @inlinable @@ -108,24 +137,33 @@ public struct ResourceGroups: AWSService { /// /// Parameters: /// - configuration: A configuration associates the resource group with an Amazon Web Services service and specifies how the service can interact with the resources in the group. A configuration is an array of GroupConfigurationItem elements. For details about the syntax of service configurations, see Service configurations for Resource Groups. A resource group can contain either a Configuration or a ResourceQuery, but not both. + /// - criticality: The critical rank of the application group on a scale of 1 to 10, with a rank of 1 being the most critical, and a rank of 10 being least critical. /// - description: The description of the resource group. Descriptions can consist of letters, numbers, hyphens, underscores, periods, and spaces. + /// - displayName: The name of the application group, which you can change at any time. /// - name: The name of the group, which is the identifier of the group in other operations. You can't change the name of a resource group after you create it. A resource group name can consist of letters, numbers, hyphens, periods, and underscores. The name cannot start with AWS, aws, or any other possible capitalization; these are reserved. A resource group name must be unique within each Amazon Web Services Region in your Amazon Web Services account. + /// - owner: A name, email address or other identifier for the person or group who is considered as the owner of this application group within your organization. /// - resourceQuery: The resource query that determines which Amazon Web Services resources are members of this group. For more information about resource queries, see Create a tag-based group in Resource Groups. A resource group can contain either a ResourceQuery or a Configuration, but not both. /// - tags: The tags to add to the group. A tag is key-value pair string. /// - logger: Logger use during operation @inlinable public func createGroup( configuration: [GroupConfigurationItem]? = nil, + criticality: Int? = nil, description: String? = nil, + displayName: String? = nil, name: String, + owner: String? = nil, resourceQuery: ResourceQuery? = nil, tags: [String: String]? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateGroupOutput { let input = CreateGroupInput( configuration: configuration, + criticality: criticality, description: description, + displayName: displayName, name: name, + owner: owner, resourceQuery: resourceQuery, tags: tags ) @@ -148,7 +186,7 @@ public struct ResourceGroups: AWSService { /// Deletes the specified resource group. Deleting a resource group does not delete any resources that are members of the group; it only deletes the group structure. Minimum permissions To run this command, you must have the following permissions: resource-groups:DeleteGroup /// /// Parameters: - /// - group: The name or the ARN of the resource group to delete. + /// - group: The name or the Amazon resource name (ARN) of the resource group to delete. /// - logger: Logger use during operation @inlinable public func deleteGroup( @@ -190,7 +228,7 @@ public struct ResourceGroups: AWSService { /// Returns information about a specified resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetGroup /// /// Parameters: - /// - group: The name or the ARN of the resource group to retrieve. + /// - group: The name or the Amazon resource name (ARN) of the resource group to retrieve. /// - logger: Logger use during operation @inlinable public func getGroup( @@ -219,7 +257,7 @@ public struct ResourceGroups: AWSService { /// Retrieves the service configuration associated with the specified resource group. For details about the service configuration syntax, see Service configurations for Resource Groups. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetGroupConfiguration /// /// Parameters: - /// - group: The name or the ARN of the resource group for which you want to retrive the service configuration. + /// - group: The name or the Amazon resource name (ARN) of the resource group for which you want to retrive the service configuration. /// - logger: Logger use during operation @inlinable public func getGroupConfiguration( @@ -248,7 +286,7 @@ public struct ResourceGroups: AWSService { /// Retrieves the resource query associated with the specified resource group. For more information about resource queries, see Create a tag-based group in Resource Groups. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetGroupQuery /// /// Parameters: - /// - group: The name or the ARN of the resource group to query. + /// - group: The name or the Amazon resource name (ARN) of the resource group to query. /// - logger: Logger use during operation @inlinable public func getGroupQuery( @@ -261,7 +299,36 @@ public struct ResourceGroups: AWSService { return try await self.getGroupQuery(input, logger: logger) } - /// Returns a list of tags that are associated with a resource group, specified by an ARN. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetTags + /// Returns information about a specified tag-sync task. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetTagSyncTask on the application group + @Sendable + @inlinable + public func getTagSyncTask(_ input: GetTagSyncTaskInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTagSyncTaskOutput { + try await self.client.execute( + operation: "GetTagSyncTask", + path: "/get-tag-sync-task", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns information about a specified tag-sync task. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetTagSyncTask on the application group + /// + /// Parameters: + /// - taskArn: The Amazon resource name (ARN) of the tag-sync task. + /// - logger: Logger use during operation + @inlinable + public func getTagSyncTask( + taskArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetTagSyncTaskOutput { + let input = GetTagSyncTaskInput( + taskArn: taskArn + ) + return try await self.getTagSyncTask(input, logger: logger) + } + + /// Returns a list of tags that are associated with a resource group, specified by an Amazon resource name (ARN). Minimum permissions To run this command, you must have the following permissions: resource-groups:GetTags @Sendable @inlinable public func getTags(_ input: GetTagsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetTagsOutput { @@ -274,10 +341,10 @@ public struct ResourceGroups: AWSService { logger: logger ) } - /// Returns a list of tags that are associated with a resource group, specified by an ARN. Minimum permissions To run this command, you must have the following permissions: resource-groups:GetTags + /// Returns a list of tags that are associated with a resource group, specified by an Amazon resource name (ARN). Minimum permissions To run this command, you must have the following permissions: resource-groups:GetTags /// /// Parameters: - /// - arn: The ARN of the resource group whose tags you want to retrieve. + /// - arn: The Amazon resource name (ARN) of the resource group whose tags you want to retrieve. /// - logger: Logger use during operation @inlinable public func getTags( @@ -290,7 +357,7 @@ public struct ResourceGroups: AWSService { return try await self.getTags(input, logger: logger) } - /// Adds the specified resources to the specified group. You can use this operation with only resource groups that are configured with the following types: AWS::EC2::HostManagement AWS::EC2::CapacityReservationPool Other resource group type and resource types aren't currently supported by this operation. Minimum permissions To run this command, you must have the following permissions: resource-groups:GroupResources + /// Adds the specified resources to the specified group. You can only use this operation with the following groups: AWS::EC2::HostManagement AWS::EC2::CapacityReservationPool AWS::ResourceGroups::ApplicationGroup Other resource group types and resource types are not currently supported by this operation. Minimum permissions To run this command, you must have the following permissions: resource-groups:GroupResources @Sendable @inlinable public func groupResources(_ input: GroupResourcesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GroupResourcesOutput { @@ -303,11 +370,11 @@ public struct ResourceGroups: AWSService { logger: logger ) } - /// Adds the specified resources to the specified group. You can use this operation with only resource groups that are configured with the following types: AWS::EC2::HostManagement AWS::EC2::CapacityReservationPool Other resource group type and resource types aren't currently supported by this operation. Minimum permissions To run this command, you must have the following permissions: resource-groups:GroupResources + /// Adds the specified resources to the specified group. You can only use this operation with the following groups: AWS::EC2::HostManagement AWS::EC2::CapacityReservationPool AWS::ResourceGroups::ApplicationGroup Other resource group types and resource types are not currently supported by this operation. Minimum permissions To run this command, you must have the following permissions: resource-groups:GroupResources /// /// Parameters: - /// - group: The name or the ARN of the resource group to add resources to. - /// - resourceArns: The list of ARNs of the resources to be added to the group. + /// - group: The name or the Amazon resource name (ARN) of the resource group to add resources to. + /// - resourceArns: The list of Amazon resource names (ARNs) of the resources to be added to the group. /// - logger: Logger use during operation @inlinable public func groupResources( @@ -322,7 +389,7 @@ public struct ResourceGroups: AWSService { return try await self.groupResources(input, logger: logger) } - /// Returns a list of ARNs of the resources that are members of a specified resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListGroupResources cloudformation:DescribeStacks cloudformation:ListStackResources tag:GetResources + /// Returns a list of Amazon resource names (ARNs) of the resources that are members of a specified resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListGroupResources cloudformation:DescribeStacks cloudformation:ListStackResources tag:GetResources @Sendable @inlinable public func listGroupResources(_ input: ListGroupResourcesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGroupResourcesOutput { @@ -335,11 +402,11 @@ public struct ResourceGroups: AWSService { logger: logger ) } - /// Returns a list of ARNs of the resources that are members of a specified resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListGroupResources cloudformation:DescribeStacks cloudformation:ListStackResources tag:GetResources + /// Returns a list of Amazon resource names (ARNs) of the resources that are members of a specified resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListGroupResources cloudformation:DescribeStacks cloudformation:ListStackResources tag:GetResources /// /// Parameters: /// - filters: Filters, formatted as ResourceFilter objects, that you want to apply to a ListGroupResources operation. Filters the results to include only those of the specified resource types. resource-type - Filter resources by their type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket. When you specify a resource-type filter for ListGroupResources, Resource Groups validates your filter resource types against the types that are defined in the query associated with the group. For example, if a group contains only S3 buckets because its query specifies only that resource type, but your resource-type filter includes EC2 instances, AWS Resource Groups does not filter for EC2 instances. In this case, a ListGroupResources request returns a BadRequestException error with a message similar to the following: The resource types specified as filters in the request are not valid. The error includes a list of resource types that failed the validation because they are not part of the query associated with the group. This validation doesn't occur when the group query specifies AWS::AllSupported, because a group based on such a query can contain any of the allowed resource types for the query type (tag-based or Amazon CloudFront stack-based queries). - /// - group: The name or the ARN of the resource group + /// - group: The name or the Amazon resource name (ARN) of the resource group. /// - maxResults: The total number of results that you want included on each page of the /// - nextToken: The parameter for receiving additional results if you receive a /// - logger: Logger use during operation @@ -360,6 +427,44 @@ public struct ResourceGroups: AWSService { return try await self.listGroupResources(input, logger: logger) } + /// Returns the status of the last grouping or ungrouping action for each resource in the specified application group. + @Sendable + @inlinable + public func listGroupingStatuses(_ input: ListGroupingStatusesInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListGroupingStatusesOutput { + try await self.client.execute( + operation: "ListGroupingStatuses", + path: "/list-grouping-statuses", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns the status of the last grouping or ungrouping action for each resource in the specified application group. + /// + /// Parameters: + /// - filters: The filter name and value pair that is used to return more specific results from a list of resources. + /// - group: The application group identifier, expressed as an Amazon resource name (ARN) or the application group name. + /// - maxResults: The maximum number of resources and their statuses returned in the response. + /// - nextToken: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value provided by a previous call's NextToken response to indicate where the output should continue from. + /// - logger: Logger use during operation + @inlinable + public func listGroupingStatuses( + filters: [ListGroupingStatusesFilter]? = nil, + group: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListGroupingStatusesOutput { + let input = ListGroupingStatusesInput( + filters: filters, + group: group, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listGroupingStatuses(input, logger: logger) + } + /// Returns a list of existing Resource Groups in your account. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListGroups @Sendable @inlinable @@ -376,7 +481,7 @@ public struct ResourceGroups: AWSService { /// Returns a list of existing Resource Groups in your account. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListGroups /// /// Parameters: - /// - filters: Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation. resource-type - Filter the results to include only those resource groups that have the specified resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes AWS::EC2::Instance. configuration-type - Filter the results to include only those groups that have the specified configuration types attached. The current supported values are: AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup + /// - filters: Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation. resource-type - Filter the results to include only those resource groups that have the specified resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes AWS::EC2::Instance. configuration-type - Filter the results to include only those groups that have the specified configuration types attached. The current supported values are: AWS::ResourceGroups::ApplicationGroup AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup /// - maxResults: The total number of results that you want included on each page of the /// - nextToken: The parameter for receiving additional results if you receive a /// - logger: Logger use during operation @@ -395,6 +500,41 @@ public struct ResourceGroups: AWSService { return try await self.listGroups(input, logger: logger) } + /// Returns a list of tag-sync tasks. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListTagSyncTasks with the group passed in the filters as the resource or * if using no filters + @Sendable + @inlinable + public func listTagSyncTasks(_ input: ListTagSyncTasksInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagSyncTasksOutput { + try await self.client.execute( + operation: "ListTagSyncTasks", + path: "/list-tag-sync-tasks", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns a list of tag-sync tasks. Minimum permissions To run this command, you must have the following permissions: resource-groups:ListTagSyncTasks with the group passed in the filters as the resource or * if using no filters + /// + /// Parameters: + /// - filters: The Amazon resource name (ARN) or name of the application group for which you want to return a list of tag-sync tasks. + /// - maxResults: The maximum number of results to be included in the response. + /// - nextToken: The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value provided by a previous call's NextToken response to indicate where the output should continue from. + /// - logger: Logger use during operation + @inlinable + public func listTagSyncTasks( + filters: [ListTagSyncTasksFilter]? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagSyncTasksOutput { + let input = ListTagSyncTasksInput( + filters: filters, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listTagSyncTasks(input, logger: logger) + } + /// Attaches a service configuration to the specified group. This occurs asynchronously, and can take time to complete. You can use GetGroupConfiguration to check the status of the update. Minimum permissions To run this command, you must have the following permissions: resource-groups:PutGroupConfiguration @Sendable @inlinable @@ -412,7 +552,7 @@ public struct ResourceGroups: AWSService { /// /// Parameters: /// - configuration: The new configuration to associate with the specified group. A configuration associates the resource group with an Amazon Web Services service and specifies how the service can interact with the resources in the group. A configuration is an array of GroupConfigurationItem elements. For information about the syntax of a service configuration, see Service configurations for Resource Groups. A resource group can contain either a Configuration or a ResourceQuery, but not both. - /// - group: The name or ARN of the resource group with the configuration that you want to update. + /// - group: The name or Amazon resource name (ARN) of the resource group with the configuration that you want to update. /// - logger: Logger use during operation @inlinable public func putGroupConfiguration( @@ -462,7 +602,45 @@ public struct ResourceGroups: AWSService { return try await self.searchResources(input, logger: logger) } - /// Adds tags to a resource group with the specified ARN. Existing tags on a resource group are not changed if they are not specified in the request parameters. Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data. Minimum permissions To run this command, you must have the following permissions: resource-groups:Tag + /// Creates a new tag-sync task to onboard and sync resources tagged with a specific tag key-value pair to an application. Minimum permissions To run this command, you must have the following permissions: resource-groups:StartTagSyncTask on the application group resource-groups:CreateGroup iam:PassRole on the role provided in the request + @Sendable + @inlinable + public func startTagSyncTask(_ input: StartTagSyncTaskInput, logger: Logger = AWSClient.loggingDisabled) async throws -> StartTagSyncTaskOutput { + try await self.client.execute( + operation: "StartTagSyncTask", + path: "/start-tag-sync-task", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Creates a new tag-sync task to onboard and sync resources tagged with a specific tag key-value pair to an application. Minimum permissions To run this command, you must have the following permissions: resource-groups:StartTagSyncTask on the application group resource-groups:CreateGroup iam:PassRole on the role provided in the request + /// + /// Parameters: + /// - group: The Amazon resource name (ARN) or name of the application group for which you want to create a tag-sync task. + /// - roleArn: The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf. + /// - tagKey: The tag key. Resources tagged with this tag key-value pair will be added to the application. If a resource with this tag is later untagged, the tag-sync task removes the resource from the application. + /// - tagValue: The tag value. Resources tagged with this tag key-value pair will be added to the application. If a resource with this tag is later untagged, the tag-sync task removes the resource from the application. + /// - logger: Logger use during operation + @inlinable + public func startTagSyncTask( + group: String, + roleArn: String, + tagKey: String, + tagValue: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> StartTagSyncTaskOutput { + let input = StartTagSyncTaskInput( + group: group, + roleArn: roleArn, + tagKey: tagKey, + tagValue: tagValue + ) + return try await self.startTagSyncTask(input, logger: logger) + } + + /// Adds tags to a resource group with the specified Amazon resource name (ARN). Existing tags on a resource group are not changed if they are not specified in the request parameters. Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data. Minimum permissions To run this command, you must have the following permissions: resource-groups:Tag @Sendable @inlinable public func tag(_ input: TagInput, logger: Logger = AWSClient.loggingDisabled) async throws -> TagOutput { @@ -475,10 +653,10 @@ public struct ResourceGroups: AWSService { logger: logger ) } - /// Adds tags to a resource group with the specified ARN. Existing tags on a resource group are not changed if they are not specified in the request parameters. Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data. Minimum permissions To run this command, you must have the following permissions: resource-groups:Tag + /// Adds tags to a resource group with the specified Amazon resource name (ARN). Existing tags on a resource group are not changed if they are not specified in the request parameters. Do not store personally identifiable information (PII) or other confidential or sensitive information in tags. We use tags to provide you with billing and administration services. Tags are not intended to be used for private or sensitive data. Minimum permissions To run this command, you must have the following permissions: resource-groups:Tag /// /// Parameters: - /// - arn: The ARN of the resource group to which to add tags. + /// - arn: The Amazon resource name (ARN) of the resource group to which to add tags. /// - tags: The tags to add to the specified resource group. A tag is a string-to-string map of key-value pairs. /// - logger: Logger use during operation @inlinable @@ -510,8 +688,8 @@ public struct ResourceGroups: AWSService { /// Removes the specified resources from the specified group. This operation works only with static groups that you populated using the GroupResources operation. It doesn't work with any resource groups that are automatically populated by tag-based or CloudFormation stack-based queries. Minimum permissions To run this command, you must have the following permissions: resource-groups:UngroupResources /// /// Parameters: - /// - group: The name or the ARN of the resource group from which to remove the resources. - /// - resourceArns: The ARNs of the resources to be removed from the group. + /// - group: The name or the Amazon resource name (ARN) of the resource group from which to remove the resources. + /// - resourceArns: The Amazon resource names (ARNs) of the resources to be removed from the group. /// - logger: Logger use during operation @inlinable public func ungroupResources( @@ -542,7 +720,7 @@ public struct ResourceGroups: AWSService { /// Deletes tags from a specified resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:Untag /// /// Parameters: - /// - arn: The ARN of the resource group from which to remove tags. The command removed both the specified keys and any values associated with those keys. + /// - arn: The Amazon resource name (ARN) of the resource group from which to remove tags. The command removed both the specified keys and any values associated with those keys. /// - keys: The keys of the tags to be removed. /// - logger: Logger use during operation @inlinable @@ -574,7 +752,7 @@ public struct ResourceGroups: AWSService { /// Turns on or turns off optional features in Resource Groups. The preceding example shows that the request to turn on group lifecycle events is IN_PROGRESS. You can call the GetAccountSettings operation to check for completion by looking for GroupLifecycleEventsStatus to change to ACTIVE. /// /// Parameters: - /// - groupLifecycleEventsDesiredStatus: Specifies whether you want to turn group lifecycle events on or off. + /// - groupLifecycleEventsDesiredStatus: Specifies whether you want to turn group lifecycle events on or off. You can't turn on group lifecycle events if your resource groups quota is greater than 2,000. /// - logger: Logger use during operation @inlinable public func updateAccountSettings( @@ -603,18 +781,27 @@ public struct ResourceGroups: AWSService { /// Updates the description for an existing group. You cannot update the name of a resource group. Minimum permissions To run this command, you must have the following permissions: resource-groups:UpdateGroup /// /// Parameters: + /// - criticality: The critical rank of the application group on a scale of 1 to 10, with a rank of 1 being the most critical, and a rank of 10 being least critical. /// - description: The new description that you want to update the resource group with. Descriptions can contain letters, numbers, hyphens, underscores, periods, and spaces. - /// - group: The name or the ARN of the resource group to modify. + /// - displayName: The name of the application group, which you can change at any time. + /// - group: The name or the ARN of the resource group to update. + /// - owner: A name, email address or other identifier for the person or group who is considered as the owner of this application group within your organization. /// - logger: Logger use during operation @inlinable public func updateGroup( + criticality: Int? = nil, description: String? = nil, + displayName: String? = nil, group: String? = nil, + owner: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateGroupOutput { let input = UpdateGroupInput( + criticality: criticality, description: description, - group: group + displayName: displayName, + group: group, + owner: owner ) return try await self.updateGroup(input, logger: logger) } @@ -635,7 +822,7 @@ public struct ResourceGroups: AWSService { /// Updates the resource query of a group. For more information about resource queries, see Create a tag-based group in Resource Groups. Minimum permissions To run this command, you must have the following permissions: resource-groups:UpdateGroupQuery /// /// Parameters: - /// - group: The name or the ARN of the resource group to query. + /// - group: The name or the Amazon resource name (ARN) of the resource group to query. /// - resourceQuery: The resource query to determine which Amazon Web Services resources are members of this resource group. A resource group can contain either a Configuration or a ResourceQuery, but not both. /// - logger: Logger use during operation @inlinable @@ -687,7 +874,7 @@ extension ResourceGroups { /// /// - Parameters: /// - filters: Filters, formatted as ResourceFilter objects, that you want to apply to a ListGroupResources operation. Filters the results to include only those of the specified resource types. resource-type - Filter resources by their type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket. When you specify a resource-type filter for ListGroupResources, Resource Groups validates your filter resource types against the types that are defined in the query associated with the group. For example, if a group contains only S3 buckets because its query specifies only that resource type, but your resource-type filter includes EC2 instances, AWS Resource Groups does not filter for EC2 instances. In this case, a ListGroupResources request returns a BadRequestException error with a message similar to the following: The resource types specified as filters in the request are not valid. The error includes a list of resource types that failed the validation because they are not part of the query associated with the group. This validation doesn't occur when the group query specifies AWS::AllSupported, because a group based on such a query can contain any of the allowed resource types for the query type (tag-based or Amazon CloudFront stack-based queries). - /// - group: The name or the ARN of the resource group + /// - group: The name or the Amazon resource name (ARN) of the resource group. /// - maxResults: The total number of results that you want included on each page of the /// - logger: Logger used for logging @inlinable @@ -705,6 +892,46 @@ extension ResourceGroups { return self.listGroupResourcesPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listGroupingStatuses(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listGroupingStatusesPaginator( + _ input: ListGroupingStatusesInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listGroupingStatuses, + inputKey: \ListGroupingStatusesInput.nextToken, + outputKey: \ListGroupingStatusesOutput.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listGroupingStatuses(_:logger:)``. + /// + /// - Parameters: + /// - filters: The filter name and value pair that is used to return more specific results from a list of resources. + /// - group: The application group identifier, expressed as an Amazon resource name (ARN) or the application group name. + /// - maxResults: The maximum number of resources and their statuses returned in the response. + /// - logger: Logger used for logging + @inlinable + public func listGroupingStatusesPaginator( + filters: [ListGroupingStatusesFilter]? = nil, + group: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListGroupingStatusesInput( + filters: filters, + group: group, + maxResults: maxResults + ) + return self.listGroupingStatusesPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listGroups(_:logger:)``. /// /// - Parameters: @@ -726,7 +953,7 @@ extension ResourceGroups { /// Return PaginatorSequence for operation ``listGroups(_:logger:)``. /// /// - Parameters: - /// - filters: Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation. resource-type - Filter the results to include only those resource groups that have the specified resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes AWS::EC2::Instance. configuration-type - Filter the results to include only those groups that have the specified configuration types attached. The current supported values are: AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup + /// - filters: Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation. resource-type - Filter the results to include only those resource groups that have the specified resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes AWS::EC2::Instance. configuration-type - Filter the results to include only those groups that have the specified configuration types attached. The current supported values are: AWS::ResourceGroups::ApplicationGroup AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup /// - maxResults: The total number of results that you want included on each page of the /// - logger: Logger used for logging @inlinable @@ -742,6 +969,43 @@ extension ResourceGroups { return self.listGroupsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listTagSyncTasks(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listTagSyncTasksPaginator( + _ input: ListTagSyncTasksInput, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listTagSyncTasks, + inputKey: \ListTagSyncTasksInput.nextToken, + outputKey: \ListTagSyncTasksOutput.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listTagSyncTasks(_:logger:)``. + /// + /// - Parameters: + /// - filters: The Amazon resource name (ARN) or name of the application group for which you want to return a list of tag-sync tasks. + /// - maxResults: The maximum number of results to be included in the response. + /// - logger: Logger used for logging + @inlinable + public func listTagSyncTasksPaginator( + filters: [ListTagSyncTasksFilter]? = nil, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListTagSyncTasksInput( + filters: filters, + maxResults: maxResults + ) + return self.listTagSyncTasksPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``searchResources(_:logger:)``. /// /// - Parameters: @@ -792,6 +1056,18 @@ extension ResourceGroups.ListGroupResourcesInput: AWSPaginateToken { } } +extension ResourceGroups.ListGroupingStatusesInput: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> ResourceGroups.ListGroupingStatusesInput { + return .init( + filters: self.filters, + group: self.group, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension ResourceGroups.ListGroupsInput: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> ResourceGroups.ListGroupsInput { @@ -803,6 +1079,17 @@ extension ResourceGroups.ListGroupsInput: AWSPaginateToken { } } +extension ResourceGroups.ListTagSyncTasksInput: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> ResourceGroups.ListTagSyncTasksInput { + return .init( + filters: self.filters, + maxResults: self.maxResults, + nextToken: token + ) + } +} + extension ResourceGroups.SearchResourcesInput: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> ResourceGroups.SearchResourcesInput { diff --git a/Sources/Soto/Services/ResourceGroups/ResourceGroups_shapes.swift b/Sources/Soto/Services/ResourceGroups/ResourceGroups_shapes.swift index ba00a13888..8e902cec3a 100644 --- a/Sources/Soto/Services/ResourceGroups/ResourceGroups_shapes.swift +++ b/Sources/Soto/Services/ResourceGroups/ResourceGroups_shapes.swift @@ -35,6 +35,9 @@ extension ResourceGroups { public enum GroupFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case configurationType = "configuration-type" + case criticality = "criticality" + case displayName = "display-name" + case owner = "owner" case resourceType = "resource-type" public var description: String { return self.rawValue } } @@ -53,6 +56,26 @@ extension ResourceGroups { public var description: String { return self.rawValue } } + public enum GroupingStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case skipped = "SKIPPED" + case success = "SUCCESS" + public var description: String { return self.rawValue } + } + + public enum GroupingType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case group = "GROUP" + case ungroup = "UNGROUP" + public var description: String { return self.rawValue } + } + + public enum ListGroupingStatusesFilterName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case resourceArn = "resource-arn" + case status = "status" + public var description: String { return self.rawValue } + } + public enum QueryErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case cloudformationStackInactive = "CLOUDFORMATION_STACK_INACTIVE" case cloudformationStackNotExisting = "CLOUDFORMATION_STACK_NOT_EXISTING" @@ -77,6 +100,12 @@ extension ResourceGroups { public var description: String { return self.rawValue } } + public enum TagSyncTaskStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "ACTIVE" + case error = "ERROR" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct AccountSettings: AWSDecodableShape { @@ -101,23 +130,52 @@ extension ResourceGroups { } } + public struct CancelTagSyncTaskInput: AWSEncodableShape { + /// The Amazon resource name (ARN) of the tag-sync task. + public let taskArn: String + + @inlinable + public init(taskArn: String) { + self.taskArn = taskArn + } + + public func validate(name: String) throws { + try self.validate(self.taskArn, name: "taskArn", parent: name, max: 1600) + try self.validate(self.taskArn, name: "taskArn", parent: name, min: 12) + try self.validate(self.taskArn, name: "taskArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}/tag-sync-task/[a-z0-9]{26}$") + } + + private enum CodingKeys: String, CodingKey { + case taskArn = "TaskArn" + } + } + public struct CreateGroupInput: AWSEncodableShape { /// A configuration associates the resource group with an Amazon Web Services service and specifies how the service can interact with the resources in the group. A configuration is an array of GroupConfigurationItem elements. For details about the syntax of service configurations, see Service configurations for Resource Groups. A resource group can contain either a Configuration or a ResourceQuery, but not both. public let configuration: [GroupConfigurationItem]? + /// The critical rank of the application group on a scale of 1 to 10, with a rank of 1 being the most critical, and a rank of 10 being least critical. + public let criticality: Int? /// The description of the resource group. Descriptions can consist of letters, numbers, hyphens, underscores, periods, and spaces. public let description: String? + /// The name of the application group, which you can change at any time. + public let displayName: String? /// The name of the group, which is the identifier of the group in other operations. You can't change the name of a resource group after you create it. A resource group name can consist of letters, numbers, hyphens, periods, and underscores. The name cannot start with AWS, aws, or any other possible capitalization; these are reserved. A resource group name must be unique within each Amazon Web Services Region in your Amazon Web Services account. public let name: String + /// A name, email address or other identifier for the person or group who is considered as the owner of this application group within your organization. + public let owner: String? /// The resource query that determines which Amazon Web Services resources are members of this group. For more information about resource queries, see Create a tag-based group in Resource Groups. A resource group can contain either a ResourceQuery or a Configuration, but not both. public let resourceQuery: ResourceQuery? /// The tags to add to the group. A tag is key-value pair string. public let tags: [String: String]? @inlinable - public init(configuration: [GroupConfigurationItem]? = nil, description: String? = nil, name: String, resourceQuery: ResourceQuery? = nil, tags: [String: String]? = nil) { + public init(configuration: [GroupConfigurationItem]? = nil, criticality: Int? = nil, description: String? = nil, displayName: String? = nil, name: String, owner: String? = nil, resourceQuery: ResourceQuery? = nil, tags: [String: String]? = nil) { self.configuration = configuration + self.criticality = criticality self.description = description + self.displayName = displayName self.name = name + self.owner = owner self.resourceQuery = resourceQuery self.tags = tags } @@ -127,11 +185,17 @@ extension ResourceGroups { try $0.validate(name: "\(name).configuration[]") } try self.validate(self.configuration, name: "configuration", parent: name, max: 2) + try self.validate(self.criticality, name: "criticality", parent: name, max: 10) + try self.validate(self.criticality, name: "criticality", parent: name, min: 1) try self.validate(self.description, name: "description", parent: name, max: 1024) try self.validate(self.description, name: "description", parent: name, pattern: "^[\\sa-zA-Z0-9_\\.-]*$") + try self.validate(self.displayName, name: "displayName", parent: name, max: 300) + try self.validate(self.displayName, name: "displayName", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") try self.validate(self.name, name: "name", parent: name, max: 300) try self.validate(self.name, name: "name", parent: name, min: 1) try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.owner, name: "owner", parent: name, max: 300) + try self.validate(self.owner, name: "owner", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") try self.resourceQuery?.validate(name: "\(name).resourceQuery") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) @@ -144,8 +208,11 @@ extension ResourceGroups { private enum CodingKeys: String, CodingKey { case configuration = "Configuration" + case criticality = "Criticality" case description = "Description" + case displayName = "DisplayName" case name = "Name" + case owner = "Owner" case resourceQuery = "ResourceQuery" case tags = "Tags" } @@ -178,7 +245,7 @@ extension ResourceGroups { } public struct DeleteGroupInput: AWSEncodableShape { - /// The name or the ARN of the resource group to delete. + /// The name or the Amazon resource name (ARN) of the resource group to delete. public let group: String? /// Deprecated - don't use this parameter. Use Group instead. public let groupName: String? @@ -199,10 +266,10 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.validate(self.groupName, name: "groupName", parent: name, max: 300) try self.validate(self.groupName, name: "groupName", parent: name, min: 1) - try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") } private enum CodingKeys: String, CodingKey { @@ -230,7 +297,7 @@ extension ResourceGroups { public let errorCode: String? /// The error message text associated with the failure. public let errorMessage: String? - /// The ARN of the resource that failed to be added or removed. + /// The Amazon resource name (ARN) of the resource that failed to be added or removed. public let resourceArn: String? @inlinable @@ -262,7 +329,7 @@ extension ResourceGroups { } public struct GetGroupConfigurationInput: AWSEncodableShape { - /// The name or the ARN of the resource group for which you want to retrive the service configuration. + /// The name or the Amazon resource name (ARN) of the resource group for which you want to retrive the service configuration. public let group: String? @inlinable @@ -273,7 +340,7 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") } private enum CodingKeys: String, CodingKey { @@ -296,7 +363,7 @@ extension ResourceGroups { } public struct GetGroupInput: AWSEncodableShape { - /// The name or the ARN of the resource group to retrieve. + /// The name or the Amazon resource name (ARN) of the resource group to retrieve. public let group: String? /// Deprecated - don't use this parameter. Use Group instead. public let groupName: String? @@ -317,10 +384,10 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.validate(self.groupName, name: "groupName", parent: name, max: 300) try self.validate(self.groupName, name: "groupName", parent: name, min: 1) - try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") } private enum CodingKeys: String, CodingKey { @@ -344,7 +411,7 @@ extension ResourceGroups { } public struct GetGroupQueryInput: AWSEncodableShape { - /// The name or the ARN of the resource group to query. + /// The name or the Amazon resource name (ARN) of the resource group to query. public let group: String? /// Don't use this parameter. Use Group instead. public let groupName: String? @@ -365,10 +432,10 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.validate(self.groupName, name: "groupName", parent: name, max: 300) try self.validate(self.groupName, name: "groupName", parent: name, min: 1) - try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") } private enum CodingKeys: String, CodingKey { @@ -391,8 +458,74 @@ extension ResourceGroups { } } + public struct GetTagSyncTaskInput: AWSEncodableShape { + /// The Amazon resource name (ARN) of the tag-sync task. + public let taskArn: String + + @inlinable + public init(taskArn: String) { + self.taskArn = taskArn + } + + public func validate(name: String) throws { + try self.validate(self.taskArn, name: "taskArn", parent: name, max: 1600) + try self.validate(self.taskArn, name: "taskArn", parent: name, min: 12) + try self.validate(self.taskArn, name: "taskArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}/tag-sync-task/[a-z0-9]{26}$") + } + + private enum CodingKeys: String, CodingKey { + case taskArn = "TaskArn" + } + } + + public struct GetTagSyncTaskOutput: AWSDecodableShape { + /// The timestamp of when the tag-sync task was created. + public let createdAt: Date? + /// The specific error message in cases where the tag-sync task status is ERROR. + public let errorMessage: String? + /// The Amazon resource name (ARN) of the application group. + public let groupArn: String? + /// The name of the application group. + public let groupName: String? + /// The Amazon resource name (ARN) of the role assumed by Resource Groups to tag and untag resources on your behalf. For more information about this role, review Tag-sync required permissions. + public let roleArn: String? + /// The status of the tag-sync task. Valid values include: ACTIVE - The tag-sync task is actively managing resources in the application by adding or removing the awsApplication tag from resources when they are tagged or untagged with the specified tag key-value pair. ERROR - The tag-sync task is not actively managing resources in the application. Review the ErrorMessage for more information about resolving the error. + public let status: TagSyncTaskStatus? + /// The tag key. + public let tagKey: String? + /// The tag value. + public let tagValue: String? + /// The Amazon resource name (ARN) of the tag-sync task. + public let taskArn: String? + + @inlinable + public init(createdAt: Date? = nil, errorMessage: String? = nil, groupArn: String? = nil, groupName: String? = nil, roleArn: String? = nil, status: TagSyncTaskStatus? = nil, tagKey: String? = nil, tagValue: String? = nil, taskArn: String? = nil) { + self.createdAt = createdAt + self.errorMessage = errorMessage + self.groupArn = groupArn + self.groupName = groupName + self.roleArn = roleArn + self.status = status + self.tagKey = tagKey + self.tagValue = tagValue + self.taskArn = taskArn + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "CreatedAt" + case errorMessage = "ErrorMessage" + case groupArn = "GroupArn" + case groupName = "GroupName" + case roleArn = "RoleArn" + case status = "Status" + case tagKey = "TagKey" + case tagValue = "TagValue" + case taskArn = "TaskArn" + } + } + public struct GetTagsInput: AWSEncodableShape { - /// The ARN of the resource group whose tags you want to retrieve. + /// The Amazon resource name (ARN) of the resource group whose tags you want to retrieve. public let arn: String @inlinable @@ -409,14 +542,14 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.arn, name: "arn", parent: name, max: 1600) try self.validate(self.arn, name: "arn", parent: name, min: 12) - try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") } private enum CodingKeys: CodingKey {} } public struct GetTagsOutput: AWSDecodableShape { - /// The ARN of the tagged resource group. + /// TheAmazon resource name (ARN) of the tagged resource group. public let arn: String? /// The tags associated with the specified resource group. public let tags: [String: String]? @@ -434,24 +567,40 @@ extension ResourceGroups { } public struct Group: AWSDecodableShape { + /// A tag that defines the application group membership. This tag is only supported for application groups. + public let applicationTag: [String: String]? + /// The critical rank of the application group on a scale of 1 to 10, with a rank of 1 being the most critical, and a rank of 10 being least critical. + public let criticality: Int? /// The description of the resource group. public let description: String? - /// The ARN of the resource group. + /// The name of the application group, which you can change at any time. + public let displayName: String? + /// The Amazon resource name (ARN) of the resource group. public let groupArn: String /// The name of the resource group. public let name: String + /// A name, email address or other identifier for the person or group who is considered as the owner of this application group within your organization. + public let owner: String? @inlinable - public init(description: String? = nil, groupArn: String, name: String) { + public init(applicationTag: [String: String]? = nil, criticality: Int? = nil, description: String? = nil, displayName: String? = nil, groupArn: String, name: String, owner: String? = nil) { + self.applicationTag = applicationTag + self.criticality = criticality self.description = description + self.displayName = displayName self.groupArn = groupArn self.name = name + self.owner = owner } private enum CodingKeys: String, CodingKey { + case applicationTag = "ApplicationTag" + case criticality = "Criticality" case description = "Description" + case displayName = "DisplayName" case groupArn = "GroupArn" case name = "Name" + case owner = "Owner" } } @@ -550,9 +699,9 @@ extension ResourceGroups { public func validate(name: String) throws { try self.values.forEach { - try validate($0, name: "values[]", parent: name, max: 128) + try validate($0, name: "values[]", parent: name, max: 300) try validate($0, name: "values[]", parent: name, min: 1) - try validate($0, name: "values[]", parent: name, pattern: "^AWS::(AllSupported|[a-zA-Z0-9]+::[a-zA-Z0-9]+)$") + try validate($0, name: "values[]", parent: name, pattern: "^AWS::(AllSupported|[a-zA-Z0-9]+::[a-zA-Z0-9]+)|[\\s\\p{L}0-9_\\.-]*$") } try self.validate(self.values, name: "values", parent: name, max: 5) try self.validate(self.values, name: "values", parent: name, min: 1) @@ -565,20 +714,36 @@ extension ResourceGroups { } public struct GroupIdentifier: AWSDecodableShape { - /// The ARN of the resource group. + /// The critical rank of the application group on a scale of 1 to 10, with a rank of 1 being the most critical, and a rank of 10 being least critical. + public let criticality: Int? + /// The description of the application group. + public let description: String? + /// The name of the application group, which you can change at any time. + public let displayName: String? + /// The Amazon resource name (ARN) of the resource group. public let groupArn: String? /// The name of the resource group. public let groupName: String? + /// A name, email address or other identifier for the person or group who is considered as the owner of this group within your organization. + public let owner: String? @inlinable - public init(groupArn: String? = nil, groupName: String? = nil) { + public init(criticality: Int? = nil, description: String? = nil, displayName: String? = nil, groupArn: String? = nil, groupName: String? = nil, owner: String? = nil) { + self.criticality = criticality + self.description = description + self.displayName = displayName self.groupArn = groupArn self.groupName = groupName + self.owner = owner } private enum CodingKeys: String, CodingKey { + case criticality = "Criticality" + case description = "Description" + case displayName = "DisplayName" case groupArn = "GroupArn" case groupName = "GroupName" + case owner = "Owner" } } @@ -601,9 +766,9 @@ extension ResourceGroups { } public struct GroupResourcesInput: AWSEncodableShape { - /// The name or the ARN of the resource group to add resources to. + /// The name or the Amazon resource name (ARN) of the resource group to add resources to. public let group: String - /// The list of ARNs of the resources to be added to the group. + /// The list of Amazon resource names (ARNs) of the resources to be added to the group. public let resourceArns: [String] @inlinable @@ -615,7 +780,7 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.resourceArns.forEach { try validate($0, name: "resourceArns[]", parent: name, pattern: "^arn:aws(-[a-z]+)*:[a-z0-9\\-]*:([a-z]{2}(-[a-z]+)+-\\d{1})?:([0-9]{12})?:.+$") } @@ -630,11 +795,11 @@ extension ResourceGroups { } public struct GroupResourcesOutput: AWSDecodableShape { - /// A list of ARNs of any resources that this operation failed to add to the group. + /// A list of Amazon resource names (ARNs) of any resources that this operation failed to add to the group. public let failed: [FailedResource]? - /// A list of ARNs of any resources that this operation is still in the process adding to the group. These pending additions continue asynchronously. You can check the status of pending additions by using the ListGroupResources operation, and checking the Resources array in the response and the Status field of each object in that array. + /// A list of Amazon resource names (ARNs) of any resources that this operation is still in the process adding to the group. These pending additions continue asynchronously. You can check the status of pending additions by using the ListGroupResources operation, and checking the Resources array in the response and the Status field of each object in that array. public let pending: [PendingResource]? - /// A list of ARNs of the resources that this operation successfully added to the group. + /// A list of Amazon resource names (ARNs) of the resources that this operation successfully added to the group. public let succeeded: [String]? @inlinable @@ -651,10 +816,44 @@ extension ResourceGroups { } } + public struct GroupingStatusesItem: AWSDecodableShape { + /// Describes the resource grouping action with values of GROUP or UNGROUP. + public let action: GroupingType? + /// Specifies the error code that was raised. + public let errorCode: String? + /// A message that explains the ErrorCode. + public let errorMessage: String? + /// The Amazon resource name (ARN) of a resource. + public let resourceArn: String? + /// Describes the resource grouping status with values of SUCCESS, FAILED, IN_PROGRESS, or SKIPPED. + public let status: GroupingStatus? + /// A timestamp of when the status was last updated. + public let updatedAt: Date? + + @inlinable + public init(action: GroupingType? = nil, errorCode: String? = nil, errorMessage: String? = nil, resourceArn: String? = nil, status: GroupingStatus? = nil, updatedAt: Date? = nil) { + self.action = action + self.errorCode = errorCode + self.errorMessage = errorMessage + self.resourceArn = resourceArn + self.status = status + self.updatedAt = updatedAt + } + + private enum CodingKeys: String, CodingKey { + case action = "Action" + case errorCode = "ErrorCode" + case errorMessage = "ErrorMessage" + case resourceArn = "ResourceArn" + case status = "Status" + case updatedAt = "UpdatedAt" + } + } + public struct ListGroupResourcesInput: AWSEncodableShape { /// Filters, formatted as ResourceFilter objects, that you want to apply to a ListGroupResources operation. Filters the results to include only those of the specified resource types. resource-type - Filter resources by their type. Specify up to five resource types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, or AWS::S3::Bucket. When you specify a resource-type filter for ListGroupResources, Resource Groups validates your filter resource types against the types that are defined in the query associated with the group. For example, if a group contains only S3 buckets because its query specifies only that resource type, but your resource-type filter includes EC2 instances, AWS Resource Groups does not filter for EC2 instances. In this case, a ListGroupResources request returns a BadRequestException error with a message similar to the following: The resource types specified as filters in the request are not valid. The error includes a list of resource types that failed the validation because they are not part of the query associated with the group. This validation doesn't occur when the group query specifies AWS::AllSupported, because a group based on such a query can contain any of the allowed resource types for the query type (tag-based or Amazon CloudFront stack-based queries). public let filters: [ResourceFilter]? - /// The name or the ARN of the resource group + /// The name or the Amazon resource name (ARN) of the resource group. public let group: String? /// Deprecated - don't use this parameter. Use the Group request field instead. public let groupName: String? @@ -698,10 +897,10 @@ extension ResourceGroups { } try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.validate(self.groupName, name: "groupName", parent: name, max: 300) try self.validate(self.groupName, name: "groupName", parent: name, min: 1) - try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) @@ -772,8 +971,95 @@ extension ResourceGroups { } } + public struct ListGroupingStatusesFilter: AWSEncodableShape { + /// The name of the filter. Filter names are case-sensitive. + public let name: ListGroupingStatusesFilterName + /// One or more filter values. Allowed filter values vary by resource filter name, and are case-sensitive. + public let values: [String] + + @inlinable + public init(name: ListGroupingStatusesFilterName, values: [String]) { + self.name = name + self.values = values + } + + public func validate(name: String) throws { + try self.values.forEach { + try validate($0, name: "values[]", parent: name, pattern: "^SUCCESS|FAILED|IN_PROGRESS|SKIPPED|arn:aws(-[a-z]+)*:[a-z0-9\\-]*:([a-z]{2}(-[a-z]+)+-\\d{1})?:([0-9]{12})?:.+$") + } + try self.validate(self.values, name: "values", parent: name, max: 10) + try self.validate(self.values, name: "values", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case name = "Name" + case values = "Values" + } + } + + public struct ListGroupingStatusesInput: AWSEncodableShape { + /// The filter name and value pair that is used to return more specific results from a list of resources. + public let filters: [ListGroupingStatusesFilter]? + /// The application group identifier, expressed as an Amazon resource name (ARN) or the application group name. + public let group: String + /// The maximum number of resources and their statuses returned in the response. + public let maxResults: Int? + /// The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value provided by a previous call's NextToken response to indicate where the output should continue from. + public let nextToken: String? + + @inlinable + public init(filters: [ListGroupingStatusesFilter]? = nil, group: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.filters = filters + self.group = group + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.filters?.forEach { + try $0.validate(name: "\(name).filters[]") + } + try self.validate(self.group, name: "group", parent: name, max: 1600) + try self.validate(self.group, name: "group", parent: name, min: 1) + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[a-zA-Z0-9+/]*={0,2}$") + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case group = "Group" + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct ListGroupingStatusesOutput: AWSDecodableShape { + /// The application group identifier, expressed as an Amazon resource name (ARN) or the application group name. + public let group: String? + /// Returns details about the grouping or ungrouping status of the resources in the specified application group. + public let groupingStatuses: [GroupingStatusesItem]? + /// If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. + public let nextToken: String? + + @inlinable + public init(group: String? = nil, groupingStatuses: [GroupingStatusesItem]? = nil, nextToken: String? = nil) { + self.group = group + self.groupingStatuses = groupingStatuses + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case group = "Group" + case groupingStatuses = "GroupingStatuses" + case nextToken = "NextToken" + } + } + public struct ListGroupsInput: AWSEncodableShape { - /// Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation. resource-type - Filter the results to include only those resource groups that have the specified resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes AWS::EC2::Instance. configuration-type - Filter the results to include only those groups that have the specified configuration types attached. The current supported values are: AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup + /// Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups operation. resource-type - Filter the results to include only those resource groups that have the specified resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would return any resource group with a ResourceTypeFilter that includes AWS::EC2::Instance. configuration-type - Filter the results to include only those groups that have the specified configuration types attached. The current supported values are: AWS::ResourceGroups::ApplicationGroup AWS::AppRegistry::Application AWS::AppRegistry::ApplicationResourceGroups AWS::CloudFormation::Stack AWS::EC2::CapacityReservationPool AWS::EC2::HostManagement AWS::NetworkFirewall::RuleGroup public let filters: [GroupFilter]? /// The total number of results that you want included on each page of the /// response. If you do not include this parameter, it defaults to a value that is specific to the @@ -853,6 +1139,83 @@ extension ResourceGroups { } } + public struct ListTagSyncTasksFilter: AWSEncodableShape { + /// The Amazon resource name (ARN) of the application group. + public let groupArn: String? + /// The name of the application group. + public let groupName: String? + + @inlinable + public init(groupArn: String? = nil, groupName: String? = nil) { + self.groupArn = groupArn + self.groupName = groupName + } + + public func validate(name: String) throws { + try self.validate(self.groupArn, name: "groupArn", parent: name, max: 1600) + try self.validate(self.groupArn, name: "groupArn", parent: name, min: 12) + try self.validate(self.groupArn, name: "groupArn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") + try self.validate(self.groupName, name: "groupName", parent: name, max: 300) + try self.validate(self.groupName, name: "groupName", parent: name, min: 1) + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") + } + + private enum CodingKeys: String, CodingKey { + case groupArn = "GroupArn" + case groupName = "GroupName" + } + } + + public struct ListTagSyncTasksInput: AWSEncodableShape { + /// The Amazon resource name (ARN) or name of the application group for which you want to return a list of tag-sync tasks. + public let filters: [ListTagSyncTasksFilter]? + /// The maximum number of results to be included in the response. + public let maxResults: Int? + /// The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value provided by a previous call's NextToken response to indicate where the output should continue from. + public let nextToken: String? + + @inlinable + public init(filters: [ListTagSyncTasksFilter]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.filters = filters + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func validate(name: String) throws { + try self.filters?.forEach { + try $0.validate(name: "\(name).filters[]") + } + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 50) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 8192) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[a-zA-Z0-9+/]*={0,2}$") + } + + private enum CodingKeys: String, CodingKey { + case filters = "Filters" + case maxResults = "MaxResults" + case nextToken = "NextToken" + } + } + + public struct ListTagSyncTasksOutput: AWSDecodableShape { + /// If present, indicates that more output is available than is included in the current response. Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. You should repeat this until the NextToken response element comes back as null. + public let nextToken: String? + /// A list of tag-sync tasks and information about each task. + public let tagSyncTasks: [TagSyncTaskItem]? + + @inlinable + public init(nextToken: String? = nil, tagSyncTasks: [TagSyncTaskItem]? = nil) { + self.nextToken = nextToken + self.tagSyncTasks = tagSyncTasks + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "NextToken" + case tagSyncTasks = "TagSyncTasks" + } + } + public struct PendingResource: AWSDecodableShape { /// The Amazon resource name (ARN) of the resource that's in a pending state. public let resourceArn: String? @@ -870,7 +1233,7 @@ extension ResourceGroups { public struct PutGroupConfigurationInput: AWSEncodableShape { /// The new configuration to associate with the specified group. A configuration associates the resource group with an Amazon Web Services service and specifies how the service can interact with the resources in the group. A configuration is an array of GroupConfigurationItem elements. For information about the syntax of a service configuration, see Service configurations for Resource Groups. A resource group can contain either a Configuration or a ResourceQuery, but not both. public let configuration: [GroupConfigurationItem]? - /// The name or ARN of the resource group with the configuration that you want to update. + /// The name or Amazon resource name (ARN) of the resource group with the configuration that you want to update. public let group: String? @inlinable @@ -886,7 +1249,7 @@ extension ResourceGroups { try self.validate(self.configuration, name: "configuration", parent: name, max: 2) try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") } private enum CodingKeys: String, CodingKey { @@ -946,7 +1309,7 @@ extension ResourceGroups { } public struct ResourceIdentifier: AWSDecodableShape { - /// The ARN of a resource. + /// The Amazon resource name (ARN) of a resource. public let resourceArn: String? /// The resource type of a resource, such as AWS::EC2::Instance. public let resourceType: String? @@ -966,7 +1329,7 @@ extension ResourceGroups { public struct ResourceQuery: AWSEncodableShape & AWSDecodableShape { /// The query that defines a group or a search. The contents depends on the value of the Type element. ResourceTypeFilters – Applies to all ResourceQuery objects of either Type. This element contains one of the following two items: The value AWS::AllSupported. This causes the ResourceQuery to match resources of any resource type that also match the query. A list (a JSON array) of resource type identifiers that limit the query to only resources of the specified types. For the complete list of resource types that you can use in the array value for ResourceTypeFilters, see Resources you can use with Resource Groups and Tag Editor in the Resource Groups User Guide. Example: "ResourceTypeFilters": ["AWS::AllSupported"] or "ResourceTypeFilters": ["AWS::EC2::Instance", "AWS::S3::Bucket"] TagFilters – applicable only if Type = TAG_FILTERS_1_0. The Query contains a JSON string that represents a collection of simple tag filters. The JSON string uses a syntax similar to the GetResources operation, but uses only the ResourceTypeFilters and TagFilters fields. If you specify more than one tag key, only resources that match all tag keys, and at least one value of each specified tag key, are returned in your query. If you specify more than one value for a tag key, a resource matches the filter if it has a tag key value that matches any of the specified values. For example, consider the following sample query for resources that have two tags, Stage and Version, with two values each: [{"Stage":["Test","Deploy"]},{"Version":["1","2"]}] The results of this resource query could include the following. An Amazon EC2 instance that has the following two tags: {"Stage":"Deploy"}, and {"Version":"2"} An S3 bucket that has the following two tags: {"Stage":"Test"}, and {"Version":"1"} The resource query results would not include the following items in the results, however. An Amazon EC2 instance that has only the following tag: {"Stage":"Deploy"}. The instance does not have all of the tag keys specified in the filter, so it is excluded from the results. An RDS database that has the following two tags: {"Stage":"Archived"} and {"Version":"4"} The database has all of the tag keys, but none of those keys has an associated value that matches at least one of the specified values in the filter. Example: "TagFilters": [ { "Key": "Stage", "Values": [ "Gamma", "Beta" ] } StackIdentifier – applicable only if Type = CLOUDFORMATION_STACK_1_0. The value of this parameter is the Amazon Resource Name (ARN) of the CloudFormation stack whose resources you want included in the group. public let query: String - /// The type of the query to perform. This can have one of two values: CLOUDFORMATION_STACK_1_0: Specifies that you want the group to contain the members of an CloudFormation stack. The Query contains a StackIdentifier element with an ARN for a CloudFormation stack. TAG_FILTERS_1_0: Specifies that you want the group to include resource that have tags that match the query. + /// The type of the query to perform. This can have one of two values: CLOUDFORMATION_STACK_1_0: Specifies that you want the group to contain the members of an CloudFormation stack. The Query contains a StackIdentifier element with an Amazon resource name (ARN) for a CloudFormation stack. TAG_FILTERS_1_0: Specifies that you want the group to include resource that have tags that match the query. public let type: QueryType @inlinable @@ -1065,8 +1428,82 @@ extension ResourceGroups { } } + public struct StartTagSyncTaskInput: AWSEncodableShape { + /// The Amazon resource name (ARN) or name of the application group for which you want to create a tag-sync task. + public let group: String + /// The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf. + public let roleArn: String + /// The tag key. Resources tagged with this tag key-value pair will be added to the application. If a resource with this tag is later untagged, the tag-sync task removes the resource from the application. + public let tagKey: String + /// The tag value. Resources tagged with this tag key-value pair will be added to the application. If a resource with this tag is later untagged, the tag-sync task removes the resource from the application. + public let tagValue: String + + @inlinable + public init(group: String, roleArn: String, tagKey: String, tagValue: String) { + self.group = group + self.roleArn = roleArn + self.tagKey = tagKey + self.tagValue = tagValue + } + + public func validate(name: String) throws { + try self.validate(self.group, name: "group", parent: name, max: 1600) + try self.validate(self.group, name: "group", parent: name, min: 1) + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") + try self.validate(self.roleArn, name: "roleArn", parent: name, max: 2048) + try self.validate(self.roleArn, name: "roleArn", parent: name, min: 20) + try self.validate(self.roleArn, name: "roleArn", parent: name, pattern: "^arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") + try self.validate(self.tagKey, name: "tagKey", parent: name, max: 128) + try self.validate(self.tagKey, name: "tagKey", parent: name, min: 1) + try self.validate(self.tagKey, name: "tagKey", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + try self.validate(self.tagValue, name: "tagValue", parent: name, max: 256) + try self.validate(self.tagValue, name: "tagValue", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") + } + + private enum CodingKeys: String, CodingKey { + case group = "Group" + case roleArn = "RoleArn" + case tagKey = "TagKey" + case tagValue = "TagValue" + } + } + + public struct StartTagSyncTaskOutput: AWSDecodableShape { + /// The Amazon resource name (ARN) of the application group for which you want to add or remove resources. + public let groupArn: String? + /// The name of the application group to onboard and sync resources. + public let groupName: String? + /// The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf. + public let roleArn: String? + /// The tag key of the tag-sync task. + public let tagKey: String? + /// The tag value of the tag-sync task. + public let tagValue: String? + /// The Amazon resource name (ARN) of the new tag-sync task. + public let taskArn: String? + + @inlinable + public init(groupArn: String? = nil, groupName: String? = nil, roleArn: String? = nil, tagKey: String? = nil, tagValue: String? = nil, taskArn: String? = nil) { + self.groupArn = groupArn + self.groupName = groupName + self.roleArn = roleArn + self.tagKey = tagKey + self.tagValue = tagValue + self.taskArn = taskArn + } + + private enum CodingKeys: String, CodingKey { + case groupArn = "GroupArn" + case groupName = "GroupName" + case roleArn = "RoleArn" + case tagKey = "TagKey" + case tagValue = "TagValue" + case taskArn = "TaskArn" + } + } + public struct TagInput: AWSEncodableShape { - /// The ARN of the resource group to which to add tags. + /// The Amazon resource name (ARN) of the resource group to which to add tags. public let arn: String /// The tags to add to the specified resource group. A tag is a string-to-string map of key-value pairs. public let tags: [String: String] @@ -1087,7 +1524,7 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.arn, name: "arn", parent: name, max: 1600) try self.validate(self.arn, name: "arn", parent: name, min: 12) - try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.tags.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -1103,7 +1540,7 @@ extension ResourceGroups { } public struct TagOutput: AWSDecodableShape { - /// The ARN of the tagged resource. + /// The Amazon resource name (ARN) of the tagged resource. public let arn: String? /// The tags that have been added to the specified resource group. public let tags: [String: String]? @@ -1120,10 +1557,56 @@ extension ResourceGroups { } } + public struct TagSyncTaskItem: AWSDecodableShape { + /// The timestamp of when the tag-sync task was created. + public let createdAt: Date? + /// The specific error message in cases where the tag-sync task status is Error. + public let errorMessage: String? + /// The Amazon resource name (ARN) of the application group. + public let groupArn: String? + /// The name of the application group. + public let groupName: String? + /// The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf. + public let roleArn: String? + /// The status of the tag-sync task. Valid values include: ACTIVE - The tag-sync task is actively managing resources in the application by adding or removing the awsApplication tag from resources when they are tagged or untagged with the specified tag key-value pair. ERROR - The tag-sync task is not actively managing resources in the application. Review the ErrorMessage for more information about resolving the error. + public let status: TagSyncTaskStatus? + /// The tag key. + public let tagKey: String? + /// The tag value. + public let tagValue: String? + /// The Amazon resource name (ARN) of the tag-sync task. + public let taskArn: String? + + @inlinable + public init(createdAt: Date? = nil, errorMessage: String? = nil, groupArn: String? = nil, groupName: String? = nil, roleArn: String? = nil, status: TagSyncTaskStatus? = nil, tagKey: String? = nil, tagValue: String? = nil, taskArn: String? = nil) { + self.createdAt = createdAt + self.errorMessage = errorMessage + self.groupArn = groupArn + self.groupName = groupName + self.roleArn = roleArn + self.status = status + self.tagKey = tagKey + self.tagValue = tagValue + self.taskArn = taskArn + } + + private enum CodingKeys: String, CodingKey { + case createdAt = "CreatedAt" + case errorMessage = "ErrorMessage" + case groupArn = "GroupArn" + case groupName = "GroupName" + case roleArn = "RoleArn" + case status = "Status" + case tagKey = "TagKey" + case tagValue = "TagValue" + case taskArn = "TaskArn" + } + } + public struct UngroupResourcesInput: AWSEncodableShape { - /// The name or the ARN of the resource group from which to remove the resources. + /// The name or the Amazon resource name (ARN) of the resource group from which to remove the resources. public let group: String - /// The ARNs of the resources to be removed from the group. + /// The Amazon resource names (ARNs) of the resources to be removed from the group. public let resourceArns: [String] @inlinable @@ -1135,7 +1618,7 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.resourceArns.forEach { try validate($0, name: "resourceArns[]", parent: name, pattern: "^arn:aws(-[a-z]+)*:[a-z0-9\\-]*:([a-z]{2}(-[a-z]+)+-\\d{1})?:([0-9]{12})?:.+$") } @@ -1172,7 +1655,7 @@ extension ResourceGroups { } public struct UntagInput: AWSEncodableShape { - /// The ARN of the resource group from which to remove tags. The command removed both the specified keys and any values associated with those keys. + /// The Amazon resource name (ARN) of the resource group from which to remove tags. The command removed both the specified keys and any values associated with those keys. public let arn: String /// The keys of the tags to be removed. public let keys: [String] @@ -1193,7 +1676,7 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.arn, name: "arn", parent: name, max: 1600) try self.validate(self.arn, name: "arn", parent: name, min: 12) - try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.arn, name: "arn", parent: name, pattern: "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.keys.forEach { try validate($0, name: "keys[]", parent: name, max: 128) try validate($0, name: "keys[]", parent: name, min: 1) @@ -1207,7 +1690,7 @@ extension ResourceGroups { } public struct UntagOutput: AWSDecodableShape { - /// The ARN of the resource group from which tags have been removed. + /// The Amazon resource name (ARN) of the resource group from which tags have been removed. public let arn: String? /// The keys of the tags that were removed. public let keys: [String]? @@ -1225,7 +1708,7 @@ extension ResourceGroups { } public struct UpdateAccountSettingsInput: AWSEncodableShape { - /// Specifies whether you want to turn group lifecycle events on or off. + /// Specifies whether you want to turn group lifecycle events on or off. You can't turn on group lifecycle events if your resource groups quota is greater than 2,000. public let groupLifecycleEventsDesiredStatus: GroupLifecycleEventsDesiredStatus? @inlinable @@ -1253,43 +1736,64 @@ extension ResourceGroups { } public struct UpdateGroupInput: AWSEncodableShape { + /// The critical rank of the application group on a scale of 1 to 10, with a rank of 1 being the most critical, and a rank of 10 being least critical. + public let criticality: Int? /// The new description that you want to update the resource group with. Descriptions can contain letters, numbers, hyphens, underscores, periods, and spaces. public let description: String? - /// The name or the ARN of the resource group to modify. + /// The name of the application group, which you can change at any time. + public let displayName: String? + /// The name or the ARN of the resource group to update. public let group: String? /// Don't use this parameter. Use Group instead. public let groupName: String? + /// A name, email address or other identifier for the person or group who is considered as the owner of this application group within your organization. + public let owner: String? @inlinable - public init(description: String? = nil, group: String? = nil) { + public init(criticality: Int? = nil, description: String? = nil, displayName: String? = nil, group: String? = nil, owner: String? = nil) { + self.criticality = criticality self.description = description + self.displayName = displayName self.group = group self.groupName = nil + self.owner = owner } @available(*, deprecated, message: "Members groupName have been deprecated") @inlinable - public init(description: String? = nil, group: String? = nil, groupName: String? = nil) { + public init(criticality: Int? = nil, description: String? = nil, displayName: String? = nil, group: String? = nil, groupName: String? = nil, owner: String? = nil) { + self.criticality = criticality self.description = description + self.displayName = displayName self.group = group self.groupName = groupName + self.owner = owner } public func validate(name: String) throws { + try self.validate(self.criticality, name: "criticality", parent: name, max: 10) + try self.validate(self.criticality, name: "criticality", parent: name, min: 1) try self.validate(self.description, name: "description", parent: name, max: 1024) try self.validate(self.description, name: "description", parent: name, pattern: "^[\\sa-zA-Z0-9_\\.-]*$") + try self.validate(self.displayName, name: "displayName", parent: name, max: 300) + try self.validate(self.displayName, name: "displayName", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.validate(self.groupName, name: "groupName", parent: name, max: 300) try self.validate(self.groupName, name: "groupName", parent: name, min: 1) - try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") + try self.validate(self.owner, name: "owner", parent: name, max: 300) + try self.validate(self.owner, name: "owner", parent: name, pattern: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$") } private enum CodingKeys: String, CodingKey { + case criticality = "Criticality" case description = "Description" + case displayName = "DisplayName" case group = "Group" case groupName = "GroupName" + case owner = "Owner" } } @@ -1308,7 +1812,7 @@ extension ResourceGroups { } public struct UpdateGroupQueryInput: AWSEncodableShape { - /// The name or the ARN of the resource group to query. + /// The name or the Amazon resource name (ARN) of the resource group to query. public let group: String? /// Don't use this parameter. Use Group instead. public let groupName: String? @@ -1333,10 +1837,10 @@ extension ResourceGroups { public func validate(name: String) throws { try self.validate(self.group, name: "group", parent: name, max: 1600) try self.validate(self.group, name: "group", parent: name, min: 1) - try self.validate(self.group, name: "group", parent: name, pattern: "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$") + try self.validate(self.group, name: "group", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$") try self.validate(self.groupName, name: "groupName", parent: name, max: 300) try self.validate(self.groupName, name: "groupName", parent: name, min: 1) - try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]+$") + try self.validate(self.groupName, name: "groupName", parent: name, pattern: "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$") try self.resourceQuery.validate(name: "\(name).resourceQuery") } diff --git a/Sources/Soto/Services/RoboMaker/RoboMaker_api.swift b/Sources/Soto/Services/RoboMaker/RoboMaker_api.swift index d89cbb4934..4416825da4 100644 --- a/Sources/Soto/Services/RoboMaker/RoboMaker_api.swift +++ b/Sources/Soto/Services/RoboMaker/RoboMaker_api.swift @@ -79,7 +79,7 @@ public struct RoboMaker: AWSService { // MARK: API Calls - /// Deletes one or more worlds in a batch operation. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes one or more worlds in a batch operation. @Sendable @inlinable public func batchDeleteWorlds(_ input: BatchDeleteWorldsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDeleteWorldsResponse { @@ -92,7 +92,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deletes one or more worlds in a batch operation. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes one or more worlds in a batch operation. /// /// Parameters: /// - worlds: A list of Amazon Resource Names (arns) that correspond to worlds to delete. @@ -108,7 +108,7 @@ public struct RoboMaker: AWSService { return try await self.batchDeleteWorlds(input, logger: logger) } - /// Describes one or more simulation jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes one or more simulation jobs. @Sendable @inlinable public func batchDescribeSimulationJob(_ input: BatchDescribeSimulationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchDescribeSimulationJobResponse { @@ -121,7 +121,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes one or more simulation jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes one or more simulation jobs. /// /// Parameters: /// - jobs: A list of Amazon Resource Names (ARNs) of simulation jobs to describe. @@ -137,7 +137,7 @@ public struct RoboMaker: AWSService { return try await self.batchDescribeSimulationJob(input, logger: logger) } - /// Cancels the specified deployment job. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Cancels the specified deployment job. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -151,7 +151,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Cancels the specified deployment job. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Cancels the specified deployment job. /// /// Parameters: /// - job: The deployment job ARN to cancel. @@ -168,7 +168,7 @@ public struct RoboMaker: AWSService { return try await self.cancelDeploymentJob(input, logger: logger) } - /// Cancels the specified simulation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified simulation job. @Sendable @inlinable public func cancelSimulationJob(_ input: CancelSimulationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelSimulationJobResponse { @@ -181,7 +181,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Cancels the specified simulation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified simulation job. /// /// Parameters: /// - job: The simulation job ARN to cancel. @@ -197,7 +197,7 @@ public struct RoboMaker: AWSService { return try await self.cancelSimulationJob(input, logger: logger) } - /// Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch. @Sendable @inlinable public func cancelSimulationJobBatch(_ input: CancelSimulationJobBatchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelSimulationJobBatchResponse { @@ -210,7 +210,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels a simulation job batch. When you cancel a simulation job batch, you are also cancelling all of the active simulation jobs created as part of the batch. /// /// Parameters: /// - batch: The id of the batch to cancel. @@ -226,7 +226,7 @@ public struct RoboMaker: AWSService { return try await self.cancelSimulationJobBatch(input, logger: logger) } - /// Cancels the specified export job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified export job. @Sendable @inlinable public func cancelWorldExportJob(_ input: CancelWorldExportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelWorldExportJobResponse { @@ -239,7 +239,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Cancels the specified export job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified export job. /// /// Parameters: /// - job: The Amazon Resource Name (arn) of the world export job to cancel. @@ -255,7 +255,7 @@ public struct RoboMaker: AWSService { return try await self.cancelWorldExportJob(input, logger: logger) } - /// Cancels the specified world generator job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified world generator job. @Sendable @inlinable public func cancelWorldGenerationJob(_ input: CancelWorldGenerationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CancelWorldGenerationJobResponse { @@ -268,7 +268,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Cancels the specified world generator job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Cancels the specified world generator job. /// /// Parameters: /// - job: The Amazon Resource Name (arn) of the world generator job to cancel. @@ -284,7 +284,7 @@ public struct RoboMaker: AWSService { return try await self.cancelWorldGenerationJob(input, logger: logger) } - /// Deploys a specific version of a robot application to robots in a fleet. This API is no longer supported and will throw an error if used. The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version. After 90 days, deployment jobs expire and will be deleted. They will no longer be accessible. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Deploys a specific version of a robot application to robots in a fleet. The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version. After 90 days, deployment jobs expire and will be deleted. They will no longer be accessible. @available(*, deprecated, message: "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -298,7 +298,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deploys a specific version of a robot application to robots in a fleet. This API is no longer supported and will throw an error if used. The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version. After 90 days, deployment jobs expire and will be deleted. They will no longer be accessible. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Deploys a specific version of a robot application to robots in a fleet. The robot application must have a numbered applicationVersion for consistency reasons. To create a new version, use CreateRobotApplicationVersion or see Creating a Robot Application Version. After 90 days, deployment jobs expire and will be deleted. They will no longer be accessible. /// /// Parameters: /// - clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. @@ -327,7 +327,7 @@ public struct RoboMaker: AWSService { return try await self.createDeploymentJob(input, logger: logger) } - /// Creates a fleet, a logical group of robots running the same robot application. This API is no longer supported and will throw an error if used. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Creates a fleet, a logical group of robots running the same robot application. @available(*, deprecated, message: "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -341,7 +341,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a fleet, a logical group of robots running the same robot application. This API is no longer supported and will throw an error if used. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Creates a fleet, a logical group of robots running the same robot application. /// /// Parameters: /// - name: The name of the fleet. @@ -361,7 +361,7 @@ public struct RoboMaker: AWSService { return try await self.createFleet(input, logger: logger) } - /// Creates a robot. This API is no longer supported and will throw an error if used. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Creates a robot. @available(*, deprecated, message: "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -375,7 +375,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a robot. This API is no longer supported and will throw an error if used. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. Creates a robot. /// /// Parameters: /// - architecture: The target architecture of the robot. @@ -401,7 +401,7 @@ public struct RoboMaker: AWSService { return try await self.createRobot(input, logger: logger) } - /// Creates a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a robot application. @Sendable @inlinable public func createRobotApplication(_ input: CreateRobotApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateRobotApplicationResponse { @@ -414,12 +414,12 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a robot application. /// /// Parameters: /// - environment: The object that contains that URI of the Docker image that you use for your robot application. /// - name: The name of the robot application. - /// - robotSoftwareSuite: The robot software suite (ROS distribuition) used by the robot application. + /// - robotSoftwareSuite: The robot software suite used by the robot application. /// - sources: The sources of the robot application. /// - tags: A map that contains tag keys and tag values that are attached to the robot application. /// - logger: Logger use during operation @@ -442,7 +442,7 @@ public struct RoboMaker: AWSService { return try await self.createRobotApplication(input, logger: logger) } - /// Creates a version of a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a version of a robot application. @Sendable @inlinable public func createRobotApplicationVersion(_ input: CreateRobotApplicationVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateRobotApplicationVersionResponse { @@ -455,7 +455,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a version of a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a version of a robot application. /// /// Parameters: /// - application: The application information for the robot application. @@ -480,7 +480,7 @@ public struct RoboMaker: AWSService { return try await self.createRobotApplicationVersion(input, logger: logger) } - /// Creates a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation application. @Sendable @inlinable public func createSimulationApplication(_ input: CreateSimulationApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSimulationApplicationResponse { @@ -493,13 +493,13 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation application. /// /// Parameters: /// - environment: The object that contains the Docker image URI used to create your simulation application. /// - name: The name of the simulation application. /// - renderingEngine: The rendering engine for the simulation application. - /// - robotSoftwareSuite: The robot software suite (ROS distribution) used by the simulation application. + /// - robotSoftwareSuite: The robot software suite used by the simulation application. /// - simulationSoftwareSuite: The simulation software suite used by the simulation application. /// - sources: The sources of the simulation application. /// - tags: A map that contains tag keys and tag values that are attached to the simulation application. @@ -527,7 +527,7 @@ public struct RoboMaker: AWSService { return try await self.createSimulationApplication(input, logger: logger) } - /// Creates a simulation application with a specific revision id. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation application with a specific revision id. @Sendable @inlinable public func createSimulationApplicationVersion(_ input: CreateSimulationApplicationVersionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSimulationApplicationVersionResponse { @@ -540,7 +540,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a simulation application with a specific revision id. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation application with a specific revision id. /// /// Parameters: /// - application: The application information for the simulation application. @@ -565,7 +565,7 @@ public struct RoboMaker: AWSService { return try await self.createSimulationApplicationVersion(input, logger: logger) } - /// Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible. @Sendable @inlinable public func createSimulationJob(_ input: CreateSimulationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSimulationJobResponse { @@ -578,7 +578,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a simulation job. After 90 days, simulation jobs expire and will be deleted. They will no longer be accessible. /// /// Parameters: /// - clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. @@ -627,7 +627,7 @@ public struct RoboMaker: AWSService { return try await self.createSimulationJob(input, logger: logger) } - /// Creates a world export job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a world export job. @Sendable @inlinable public func createWorldExportJob(_ input: CreateWorldExportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorldExportJobResponse { @@ -640,7 +640,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a world export job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a world export job. /// /// Parameters: /// - clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. @@ -668,7 +668,7 @@ public struct RoboMaker: AWSService { return try await self.createWorldExportJob(input, logger: logger) } - /// Creates worlds using the specified template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates worlds using the specified template. @Sendable @inlinable public func createWorldGenerationJob(_ input: CreateWorldGenerationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorldGenerationJobResponse { @@ -681,7 +681,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates worlds using the specified template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates worlds using the specified template. /// /// Parameters: /// - clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. @@ -709,7 +709,7 @@ public struct RoboMaker: AWSService { return try await self.createWorldGenerationJob(input, logger: logger) } - /// Creates a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a world template. @Sendable @inlinable public func createWorldTemplate(_ input: CreateWorldTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorldTemplateResponse { @@ -722,7 +722,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Creates a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Creates a world template. /// /// Parameters: /// - clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. @@ -750,7 +750,7 @@ public struct RoboMaker: AWSService { return try await self.createWorldTemplate(input, logger: logger) } - /// Deletes a fleet. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deletes a fleet. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -764,7 +764,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deletes a fleet. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deletes a fleet. /// /// Parameters: /// - fleet: The Amazon Resource Name (ARN) of the fleet. @@ -781,7 +781,7 @@ public struct RoboMaker: AWSService { return try await self.deleteFleet(input, logger: logger) } - /// Deletes a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deletes a robot. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -795,7 +795,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deletes a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deletes a robot. /// /// Parameters: /// - robot: The Amazon Resource Name (ARN) of the robot. @@ -812,7 +812,7 @@ public struct RoboMaker: AWSService { return try await self.deleteRobot(input, logger: logger) } - /// Deletes a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a robot application. @Sendable @inlinable public func deleteRobotApplication(_ input: DeleteRobotApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteRobotApplicationResponse { @@ -825,7 +825,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deletes a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a robot application. /// /// Parameters: /// - application: The Amazon Resource Name (ARN) of the the robot application. @@ -844,7 +844,7 @@ public struct RoboMaker: AWSService { return try await self.deleteRobotApplication(input, logger: logger) } - /// Deletes a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a simulation application. @Sendable @inlinable public func deleteSimulationApplication(_ input: DeleteSimulationApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteSimulationApplicationResponse { @@ -857,7 +857,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deletes a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a simulation application. /// /// Parameters: /// - application: The application information for the simulation application to delete. @@ -876,7 +876,7 @@ public struct RoboMaker: AWSService { return try await self.deleteSimulationApplication(input, logger: logger) } - /// Deletes a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a world template. @Sendable @inlinable public func deleteWorldTemplate(_ input: DeleteWorldTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWorldTemplateResponse { @@ -889,7 +889,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deletes a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Deletes a world template. /// /// Parameters: /// - template: The Amazon Resource Name (arn) of the world template you want to delete. @@ -905,7 +905,7 @@ public struct RoboMaker: AWSService { return try await self.deleteWorldTemplate(input, logger: logger) } - /// Deregisters a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deregisters a robot. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -919,7 +919,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Deregisters a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Deregisters a robot. /// /// Parameters: /// - fleet: The Amazon Resource Name (ARN) of the fleet. @@ -939,7 +939,7 @@ public struct RoboMaker: AWSService { return try await self.deregisterRobot(input, logger: logger) } - /// Describes a deployment job. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a deployment job. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -953,7 +953,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a deployment job. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a deployment job. /// /// Parameters: /// - job: The Amazon Resource Name (ARN) of the deployment job. @@ -970,7 +970,7 @@ public struct RoboMaker: AWSService { return try await self.describeDeploymentJob(input, logger: logger) } - /// Describes a fleet. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a fleet. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -984,7 +984,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a fleet. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a fleet. /// /// Parameters: /// - fleet: The Amazon Resource Name (ARN) of the fleet. @@ -1001,7 +1001,7 @@ public struct RoboMaker: AWSService { return try await self.describeFleet(input, logger: logger) } - /// Describes a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a robot. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -1015,7 +1015,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a robot. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Describes a robot. /// /// Parameters: /// - robot: The Amazon Resource Name (ARN) of the robot to be described. @@ -1032,7 +1032,7 @@ public struct RoboMaker: AWSService { return try await self.describeRobot(input, logger: logger) } - /// Describes a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a robot application. @Sendable @inlinable public func describeRobotApplication(_ input: DescribeRobotApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeRobotApplicationResponse { @@ -1045,7 +1045,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a robot application. /// /// Parameters: /// - application: The Amazon Resource Name (ARN) of the robot application. @@ -1064,7 +1064,7 @@ public struct RoboMaker: AWSService { return try await self.describeRobotApplication(input, logger: logger) } - /// Describes a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation application. @Sendable @inlinable public func describeSimulationApplication(_ input: DescribeSimulationApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSimulationApplicationResponse { @@ -1077,7 +1077,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation application. /// /// Parameters: /// - application: The application information for the simulation application. @@ -1096,7 +1096,7 @@ public struct RoboMaker: AWSService { return try await self.describeSimulationApplication(input, logger: logger) } - /// Describes a simulation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation job. @Sendable @inlinable public func describeSimulationJob(_ input: DescribeSimulationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSimulationJobResponse { @@ -1109,7 +1109,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a simulation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation job. /// /// Parameters: /// - job: The Amazon Resource Name (ARN) of the simulation job to be described. @@ -1125,7 +1125,7 @@ public struct RoboMaker: AWSService { return try await self.describeSimulationJob(input, logger: logger) } - /// Describes a simulation job batch. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation job batch. @Sendable @inlinable public func describeSimulationJobBatch(_ input: DescribeSimulationJobBatchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeSimulationJobBatchResponse { @@ -1138,7 +1138,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a simulation job batch. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a simulation job batch. /// /// Parameters: /// - batch: The id of the batch to describe. @@ -1154,7 +1154,7 @@ public struct RoboMaker: AWSService { return try await self.describeSimulationJobBatch(input, logger: logger) } - /// Describes a world. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world. @Sendable @inlinable public func describeWorld(_ input: DescribeWorldRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWorldResponse { @@ -1167,7 +1167,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a world. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world. /// /// Parameters: /// - world: The Amazon Resource Name (arn) of the world you want to describe. @@ -1183,7 +1183,7 @@ public struct RoboMaker: AWSService { return try await self.describeWorld(input, logger: logger) } - /// Describes a world export job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world export job. @Sendable @inlinable public func describeWorldExportJob(_ input: DescribeWorldExportJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWorldExportJobResponse { @@ -1196,7 +1196,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a world export job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world export job. /// /// Parameters: /// - job: The Amazon Resource Name (arn) of the world export job to describe. @@ -1212,7 +1212,7 @@ public struct RoboMaker: AWSService { return try await self.describeWorldExportJob(input, logger: logger) } - /// Describes a world generation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world generation job. @Sendable @inlinable public func describeWorldGenerationJob(_ input: DescribeWorldGenerationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWorldGenerationJobResponse { @@ -1225,7 +1225,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a world generation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world generation job. /// /// Parameters: /// - job: The Amazon Resource Name (arn) of the world generation job to describe. @@ -1241,7 +1241,7 @@ public struct RoboMaker: AWSService { return try await self.describeWorldGenerationJob(input, logger: logger) } - /// Describes a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world template. @Sendable @inlinable public func describeWorldTemplate(_ input: DescribeWorldTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWorldTemplateResponse { @@ -1254,7 +1254,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Describes a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Describes a world template. /// /// Parameters: /// - template: The Amazon Resource Name (arn) of the world template you want to describe. @@ -1270,7 +1270,7 @@ public struct RoboMaker: AWSService { return try await self.describeWorldTemplate(input, logger: logger) } - /// Gets the world template body. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Gets the world template body. @Sendable @inlinable public func getWorldTemplateBody(_ input: GetWorldTemplateBodyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetWorldTemplateBodyResponse { @@ -1283,7 +1283,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Gets the world template body. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Gets the world template body. /// /// Parameters: /// - generationJob: The Amazon Resource Name (arn) of the world generator job. @@ -1302,7 +1302,7 @@ public struct RoboMaker: AWSService { return try await self.getWorldTemplateBody(input, logger: logger) } - /// Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -1316,7 +1316,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs. /// /// Parameters: /// - filters: Optional filters to limit results. The filter names status and fleetName are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status InProgress or the status Pending. @@ -1339,7 +1339,7 @@ public struct RoboMaker: AWSService { return try await self.listDeploymentJobs(input, logger: logger) } - /// Returns a list of fleets. You can optionally provide filters to retrieve specific fleets. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of fleets. You can optionally provide filters to retrieve specific fleets. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -1353,7 +1353,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list of fleets. You can optionally provide filters to retrieve specific fleets. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of fleets. You can optionally provide filters to retrieve specific fleets. /// /// Parameters: /// - filters: Optional filters to limit results. The filter name name is supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters. @@ -1376,7 +1376,7 @@ public struct RoboMaker: AWSService { return try await self.listFleets(input, logger: logger) } - /// Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications. @Sendable @inlinable public func listRobotApplications(_ input: ListRobotApplicationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListRobotApplicationsResponse { @@ -1389,7 +1389,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of robot application. You can optionally provide filters to retrieve specific robot applications. /// /// Parameters: /// - filters: Optional filters to limit results. The filter name name is supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters. @@ -1414,7 +1414,7 @@ public struct RoboMaker: AWSService { return try await self.listRobotApplications(input, logger: logger) } - /// Returns a list of robots. You can optionally provide filters to retrieve specific robots. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of robots. You can optionally provide filters to retrieve specific robots. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -1428,7 +1428,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list of robots. You can optionally provide filters to retrieve specific robots. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Returns a list of robots. You can optionally provide filters to retrieve specific robots. /// /// Parameters: /// - filters: Optional filters to limit results. The filter names status and fleetName are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status Registered or the status Available. @@ -1451,7 +1451,7 @@ public struct RoboMaker: AWSService { return try await self.listRobots(input, logger: logger) } - /// Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications. @Sendable @inlinable public func listSimulationApplications(_ input: ListSimulationApplicationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSimulationApplicationsResponse { @@ -1464,7 +1464,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of simulation applications. You can optionally provide filters to retrieve specific simulation applications. /// /// Parameters: /// - filters: Optional list of filters to limit results. The filter name name is supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters. @@ -1489,7 +1489,7 @@ public struct RoboMaker: AWSService { return try await self.listSimulationApplications(input, logger: logger) } - /// Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs. @Sendable @inlinable public func listSimulationJobBatches(_ input: ListSimulationJobBatchesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSimulationJobBatchesResponse { @@ -1502,7 +1502,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list simulation job batches. You can optionally provide filters to retrieve specific simulation batch jobs. /// /// Parameters: /// - filters: Optional filters to limit results. @@ -1524,7 +1524,7 @@ public struct RoboMaker: AWSService { return try await self.listSimulationJobBatches(input, logger: logger) } - /// Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs. @Sendable @inlinable public func listSimulationJobs(_ input: ListSimulationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSimulationJobsResponse { @@ -1537,7 +1537,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Returns a list of simulation jobs. You can optionally provide filters to retrieve specific simulation jobs. /// /// Parameters: /// - filters: Optional filters to limit results. The filter names status and simulationApplicationName and robotApplicationName are supported. When filtering, you must use the complete value of the filtered item. You can use up to three filters, but they must be for the same named item. For example, if you are looking for items with the status Preparing or the status Running. @@ -1559,7 +1559,7 @@ public struct RoboMaker: AWSService { return try await self.listSimulationJobs(input, logger: logger) } - /// Lists all tags on a AWS RoboMaker resource. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists all tags on a AWS RoboMaker resource. @Sendable @inlinable public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { @@ -1572,7 +1572,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Lists all tags on a AWS RoboMaker resource. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists all tags on a AWS RoboMaker resource. /// /// Parameters: /// - resourceArn: The AWS RoboMaker Amazon Resource Name (ARN) with tags to be listed. @@ -1588,7 +1588,7 @@ public struct RoboMaker: AWSService { return try await self.listTagsForResource(input, logger: logger) } - /// Lists world export jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world export jobs. @Sendable @inlinable public func listWorldExportJobs(_ input: ListWorldExportJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorldExportJobsResponse { @@ -1601,7 +1601,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Lists world export jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world export jobs. /// /// Parameters: /// - filters: Optional filters to limit results. You can use generationJobId and templateId. @@ -1623,7 +1623,7 @@ public struct RoboMaker: AWSService { return try await self.listWorldExportJobs(input, logger: logger) } - /// Lists world generator jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world generator jobs. @Sendable @inlinable public func listWorldGenerationJobs(_ input: ListWorldGenerationJobsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorldGenerationJobsResponse { @@ -1636,7 +1636,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Lists world generator jobs. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world generator jobs. /// /// Parameters: /// - filters: Optional filters to limit results. You can use status and templateId. @@ -1658,7 +1658,7 @@ public struct RoboMaker: AWSService { return try await self.listWorldGenerationJobs(input, logger: logger) } - /// Lists world templates. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world templates. @Sendable @inlinable public func listWorldTemplates(_ input: ListWorldTemplatesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorldTemplatesResponse { @@ -1671,7 +1671,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Lists world templates. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists world templates. /// /// Parameters: /// - maxResults: When this parameter is used, ListWorldTemplates only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListWorldTemplates request with the returned nextToken value. This value can be between 1 and 100. If this parameter is not used, then ListWorldTemplates returns up to 100 results and a nextToken value if applicable. @@ -1690,7 +1690,7 @@ public struct RoboMaker: AWSService { return try await self.listWorldTemplates(input, logger: logger) } - /// Lists worlds. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists worlds. @Sendable @inlinable public func listWorlds(_ input: ListWorldsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWorldsResponse { @@ -1703,7 +1703,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Lists worlds. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Lists worlds. /// /// Parameters: /// - filters: Optional filters to limit results. You can use status. @@ -1725,7 +1725,7 @@ public struct RoboMaker: AWSService { return try await self.listWorlds(input, logger: logger) } - /// Registers a robot with a fleet. This API is no longer supported and will throw an error if used. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Registers a robot with a fleet. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. @available(*, deprecated, message: "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -1739,7 +1739,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Registers a robot with a fleet. This API is no longer supported and will throw an error if used. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Registers a robot with a fleet. This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page. /// /// Parameters: /// - fleet: The Amazon Resource Name (ARN) of the fleet. @@ -1759,7 +1759,7 @@ public struct RoboMaker: AWSService { return try await self.registerRobot(input, logger: logger) } - /// Restarts a running simulation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Restarts a running simulation job. @Sendable @inlinable public func restartSimulationJob(_ input: RestartSimulationJobRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RestartSimulationJobResponse { @@ -1772,7 +1772,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Restarts a running simulation job. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Restarts a running simulation job. /// /// Parameters: /// - job: The Amazon Resource Name (ARN) of the simulation job. @@ -1788,7 +1788,7 @@ public struct RoboMaker: AWSService { return try await self.restartSimulationJob(input, logger: logger) } - /// Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects. @Sendable @inlinable public func startSimulationJobBatch(_ input: StartSimulationJobBatchRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> StartSimulationJobBatchResponse { @@ -1801,7 +1801,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Starts a new simulation job batch. The batch is defined using one or more SimulationJobRequest objects. /// /// Parameters: /// - batchPolicy: The batch policy. @@ -1826,7 +1826,7 @@ public struct RoboMaker: AWSService { return try await self.startSimulationJobBatch(input, logger: logger) } - /// Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment. @available(*, deprecated, message: "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html.") @Sendable @inlinable @@ -1840,7 +1840,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment. This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page. Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment. /// /// Parameters: /// - clientRequestToken: Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. @@ -1860,7 +1860,7 @@ public struct RoboMaker: AWSService { return try await self.syncDeploymentJob(input, logger: logger) } - /// Adds or edits tags for a AWS RoboMaker resource. Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings. For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Adds or edits tags for a AWS RoboMaker resource. Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings. For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide. @Sendable @inlinable public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { @@ -1873,7 +1873,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Adds or edits tags for a AWS RoboMaker resource. Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings. For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Adds or edits tags for a AWS RoboMaker resource. Each tag consists of a tag key and a tag value. Tag keys and tag values are both required, but tag values can be empty strings. For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide. /// /// Parameters: /// - resourceArn: The Amazon Resource Name (ARN) of the AWS RoboMaker resource you are tagging. @@ -1892,7 +1892,7 @@ public struct RoboMaker: AWSService { return try await self.tagResource(input, logger: logger) } - /// Removes the specified tags from the specified AWS RoboMaker resource. To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource . + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Removes the specified tags from the specified AWS RoboMaker resource. To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource . @Sendable @inlinable public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { @@ -1905,7 +1905,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Removes the specified tags from the specified AWS RoboMaker resource. To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource . + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Removes the specified tags from the specified AWS RoboMaker resource. To remove a tag, specify the tag key. To change the tag value of an existing tag key, use TagResource . /// /// Parameters: /// - resourceArn: The Amazon Resource Name (ARN) of the AWS RoboMaker resource you are removing tags. @@ -1924,7 +1924,7 @@ public struct RoboMaker: AWSService { return try await self.untagResource(input, logger: logger) } - /// Updates a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a robot application. @Sendable @inlinable public func updateRobotApplication(_ input: UpdateRobotApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateRobotApplicationResponse { @@ -1937,13 +1937,13 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Updates a robot application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a robot application. /// /// Parameters: /// - application: The application information for the robot application. /// - currentRevisionId: The revision id for the robot application. /// - environment: The object that contains the Docker image URI for your robot application. - /// - robotSoftwareSuite: The robot software suite (ROS distribution) used by the robot application. + /// - robotSoftwareSuite: The robot software suite used by the robot application. /// - sources: The sources of the robot application. /// - logger: Logger use during operation @inlinable @@ -1965,7 +1965,7 @@ public struct RoboMaker: AWSService { return try await self.updateRobotApplication(input, logger: logger) } - /// Updates a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a simulation application. @Sendable @inlinable public func updateSimulationApplication(_ input: UpdateSimulationApplicationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateSimulationApplicationResponse { @@ -1978,14 +1978,14 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Updates a simulation application. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a simulation application. /// /// Parameters: /// - application: The application information for the simulation application. /// - currentRevisionId: The revision id for the robot application. /// - environment: The object that contains the Docker image URI for your simulation application. /// - renderingEngine: The rendering engine for the simulation application. - /// - robotSoftwareSuite: Information about the robot software suite (ROS distribution). + /// - robotSoftwareSuite: Information about the robot software suite. /// - simulationSoftwareSuite: The simulation software suite used by the simulation application. /// - sources: The sources of the simulation application. /// - logger: Logger use during operation @@ -2012,7 +2012,7 @@ public struct RoboMaker: AWSService { return try await self.updateSimulationApplication(input, logger: logger) } - /// Updates a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a world template. @Sendable @inlinable public func updateWorldTemplate(_ input: UpdateWorldTemplateRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateWorldTemplateResponse { @@ -2025,7 +2025,7 @@ public struct RoboMaker: AWSService { logger: logger ) } - /// Updates a world template. + /// End of support notice: On September 10, 2025, Amazon Web Services will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. For more information on transitioning to Batch to help run containerized simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/. Updates a world template. /// /// Parameters: /// - name: The name of the template. diff --git a/Sources/Soto/Services/RoboMaker/RoboMaker_shapes.swift b/Sources/Soto/Services/RoboMaker/RoboMaker_shapes.swift index d8a410880e..e707789981 100644 --- a/Sources/Soto/Services/RoboMaker/RoboMaker_shapes.swift +++ b/Sources/Soto/Services/RoboMaker/RoboMaker_shapes.swift @@ -692,7 +692,7 @@ extension RoboMaker { public let environment: Environment? /// The name of the robot application. public let name: String - /// The robot software suite (ROS distribuition) used by the robot application. + /// The robot software suite used by the robot application. public let robotSoftwareSuite: RobotSoftwareSuite /// The sources of the robot application. public let sources: [SourceConfig]? @@ -746,7 +746,7 @@ extension RoboMaker { public let name: String? /// The revision id of the robot application. public let revisionId: String? - /// The robot software suite (ROS distribution) used by the robot application. + /// The robot software suite used by the robot application. public let robotSoftwareSuite: RobotSoftwareSuite? /// The sources of the robot application. public let sources: [Source]? @@ -829,7 +829,7 @@ extension RoboMaker { public let name: String? /// The revision id of the robot application. public let revisionId: String? - /// The robot software suite (ROS distribution) used by the robot application. + /// The robot software suite used by the robot application. public let robotSoftwareSuite: RobotSoftwareSuite? /// The sources of the robot application. public let sources: [Source]? @@ -944,7 +944,7 @@ extension RoboMaker { public let name: String /// The rendering engine for the simulation application. public let renderingEngine: RenderingEngine? - /// The robot software suite (ROS distribution) used by the simulation application. + /// The robot software suite used by the simulation application. public let robotSoftwareSuite: RobotSoftwareSuite /// The simulation software suite used by the simulation application. public let simulationSoftwareSuite: SimulationSoftwareSuite @@ -1008,7 +1008,7 @@ extension RoboMaker { public let renderingEngine: RenderingEngine? /// The revision id of the simulation application. public let revisionId: String? - /// Information about the robot software suite (ROS distribution). + /// Information about the robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite? /// The simulation software suite used by the simulation application. public let simulationSoftwareSuite: SimulationSoftwareSuite? @@ -1099,7 +1099,7 @@ extension RoboMaker { public let renderingEngine: RenderingEngine? /// The revision ID of the simulation application. public let revisionId: String? - /// Information about the robot software suite (ROS distribution). + /// Information about the robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite? /// The simulation software suite used by the simulation application. public let simulationSoftwareSuite: SimulationSoftwareSuite? @@ -2178,7 +2178,7 @@ extension RoboMaker { public let name: String? /// The revision id of the robot application. public let revisionId: String? - /// The robot software suite (ROS distribution) used by the robot application. + /// The robot software suite used by the robot application. public let robotSoftwareSuite: RobotSoftwareSuite? /// The sources of the robot application. public let sources: [Source]? @@ -2327,7 +2327,7 @@ extension RoboMaker { public let renderingEngine: RenderingEngine? /// The revision id of the simulation application. public let revisionId: String? - /// Information about the robot software suite (ROS distribution). + /// Information about the robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite? /// The simulation software suite used by the simulation application. public let simulationSoftwareSuite: SimulationSoftwareSuite? @@ -4030,7 +4030,7 @@ extension RoboMaker { public let lastUpdatedAt: Date? /// The name of the robot application. public let name: String? - /// Information about a robot software suite (ROS distribution). + /// Information about a robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite? /// The version of the robot application. public let version: String? @@ -4092,9 +4092,9 @@ extension RoboMaker { } public struct RobotSoftwareSuite: AWSEncodableShape & AWSDecodableShape { - /// The name of the robot software suite (ROS distribution). + /// The name of the robot software suite. General is the only supported value. public let name: RobotSoftwareSuiteType? - /// The version of the robot software suite (ROS distribution). + /// The version of the robot software suite. Not applicable for General software suite. public let version: RobotSoftwareSuiteVersionType? @inlinable @@ -4173,7 +4173,7 @@ extension RoboMaker { public let useDefaultTools: Bool? /// A Boolean indicating whether to use default upload configurations. By default, .ros and .gazebo files are uploaded when the application terminates and all ROS topics will be recorded. If you set this value, you must specify an outputLocation. This API is no longer supported and will throw an error if used. public let useDefaultUploadConfigurations: Bool? - /// A list of world configurations. + /// A list of world configurations. This API is no longer supported and will throw an error if used. public let worldConfigs: [WorldConfig]? @inlinable @@ -4242,7 +4242,7 @@ extension RoboMaker { public let lastUpdatedAt: Date? /// The name of the simulation application. public let name: String? - /// Information about a robot software suite (ROS distribution). + /// Information about a robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite? /// Information about a simulation software suite. public let simulationSoftwareSuite: SimulationSoftwareSuite? @@ -4531,9 +4531,9 @@ extension RoboMaker { } public struct SimulationSoftwareSuite: AWSEncodableShape & AWSDecodableShape { - /// The name of the simulation software suite. + /// The name of the simulation software suite. SimulationRuntime is the only supported value. public let name: SimulationSoftwareSuiteType? - /// The version of the simulation software suite. + /// The version of the simulation software suite. Not applicable for SimulationRuntime. public let version: String? @inlinable @@ -4960,7 +4960,7 @@ extension RoboMaker { public let currentRevisionId: String? /// The object that contains the Docker image URI for your robot application. public let environment: Environment? - /// The robot software suite (ROS distribution) used by the robot application. + /// The robot software suite used by the robot application. public let robotSoftwareSuite: RobotSoftwareSuite /// The sources of the robot application. public let sources: [SourceConfig]? @@ -5007,7 +5007,7 @@ extension RoboMaker { public let name: String? /// The revision id of the robot application. public let revisionId: String? - /// The robot software suite (ROS distribution) used by the robot application. + /// The robot software suite used by the robot application. public let robotSoftwareSuite: RobotSoftwareSuite? /// The sources of the robot application. public let sources: [Source]? @@ -5047,7 +5047,7 @@ extension RoboMaker { public let environment: Environment? /// The rendering engine for the simulation application. public let renderingEngine: RenderingEngine? - /// Information about the robot software suite (ROS distribution). + /// Information about the robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite /// The simulation software suite used by the simulation application. public let simulationSoftwareSuite: SimulationSoftwareSuite @@ -5104,7 +5104,7 @@ extension RoboMaker { public let renderingEngine: RenderingEngine? /// The revision id of the simulation application. public let revisionId: String? - /// Information about the robot software suite (ROS distribution). + /// Information about the robot software suite. public let robotSoftwareSuite: RobotSoftwareSuite? /// The simulation software suite used by the simulation application. public let simulationSoftwareSuite: SimulationSoftwareSuite? @@ -5206,7 +5206,7 @@ extension RoboMaker { } public struct UploadConfiguration: AWSEncodableShape & AWSDecodableShape { - /// A prefix that specifies where files will be uploaded in Amazon S3. It is appended to the simulation output location to determine the final path. For example, if your simulation output location is s3://my-bucket and your upload configuration name is robot-test, your files will be uploaded to s3://my-bucket///robot-test. + /// A prefix that specifies where files will be uploaded in Amazon S3. It is appended to the simulation output location to determine the final path. For example, if your simulation output location is s3://amzn-s3-demo-bucket and your upload configuration name is robot-test, your files will be uploaded to s3://amzn-s3-demo-bucket///robot-test. public let name: String /// Specifies the path of the file(s) to upload. Standard Unix glob matching rules are accepted, with the addition of ** as a super asterisk. For example, specifying /var/log/**.log causes all .log files in the /var/log directory tree to be collected. For more examples, see Glob Library. public let path: String diff --git a/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift b/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift index 83879629fb..e229df11fe 100644 --- a/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift +++ b/Sources/Soto/Services/Route53Resolver/Route53Resolver_shapes.swift @@ -832,7 +832,7 @@ extension Route53Resolver { /// any unique string, for example, a date/time stamp. public let creatorRequestId: String /// The ARN of the resource that you want Resolver to send query logs. You can send query logs to an S3 bucket, a CloudWatch Logs log group, - /// or a Kinesis Data Firehose delivery stream. Examples of valid values include the following: S3 bucket: arn:aws:s3:::examplebucket You can optionally append a file prefix to the end of the ARN. arn:aws:s3:::examplebucket/development/ CloudWatch Logs log group: arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:* Kinesis Data Firehose delivery stream: arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name + /// or a Kinesis Data Firehose delivery stream. Examples of valid values include the following: S3 bucket: arn:aws:s3:::amzn-s3-demo-bucket You can optionally append a file prefix to the end of the ARN. arn:aws:s3:::amzn-s3-demo-bucket/development/ CloudWatch Logs log group: arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:* Kinesis Data Firehose delivery stream: arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name public let destinationArn: String /// The name that you want to give the query logging configuration. public let name: String @@ -3972,13 +3972,18 @@ extension Route53Resolver { /// /// For an inbound endpoint you can apply the protocols as follows: Do53 and DoH in combination. Do53 and DoH-FIPS in combination. Do53 alone. DoH alone. DoH-FIPS alone. None, which is treated as Do53. For an outbound endpoint you can apply the protocols as follows: Do53 and DoH in combination. Do53 alone. DoH alone. None, which is treated as Do53. public let `protocol`: `Protocol`? + /// The Server Name Indication of the DoH server that you want to forward queries to. + /// This is only used if the Protocol of the TargetAddress is DoH. + /// + public let serverNameIndication: String? @inlinable - public init(ip: String? = nil, ipv6: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil) { + public init(ip: String? = nil, ipv6: String? = nil, port: Int? = nil, protocol: `Protocol`? = nil, serverNameIndication: String? = nil) { self.ip = ip self.ipv6 = ipv6 self.port = port self.`protocol` = `protocol` + self.serverNameIndication = serverNameIndication } public func validate(name: String) throws { @@ -3988,6 +3993,7 @@ extension Route53Resolver { try self.validate(self.ipv6, name: "ipv6", parent: name, min: 7) try self.validate(self.port, name: "port", parent: name, max: 65535) try self.validate(self.port, name: "port", parent: name, min: 0) + try self.validate(self.serverNameIndication, name: "serverNameIndication", parent: name, max: 255) } private enum CodingKeys: String, CodingKey { @@ -3995,6 +4001,7 @@ extension Route53Resolver { case ipv6 = "Ipv6" case port = "Port" case `protocol` = "Protocol" + case serverNameIndication = "ServerNameIndication" } } @@ -4205,7 +4212,8 @@ extension Route53Resolver { /// defined as TYPENUMBER, where the /// NUMBER can be 1-65334, for /// example, TYPE28. For more information, see - /// List of DNS record types. + /// List of DNS record types. If you set up a firewall BLOCK rule with action NXDOMAIN on query type equals AAAA, + /// this action will not be applied to synthetic IPv6 addresses generated when DNS64 is enabled. public let qtype: String? @inlinable diff --git a/Sources/Soto/Services/S3/S3_api.swift b/Sources/Soto/Services/S3/S3_api.swift index 95ea81b5a1..a3fe1851e2 100644 --- a/Sources/Soto/Services/S3/S3_api.swift +++ b/Sources/Soto/Services/S3/S3_api.swift @@ -264,8 +264,8 @@ public struct S3: AWSService { return try await self.abortMultipartUpload(input, logger: logger) } - /// Completes a multipart upload by assembling previously uploaded parts. You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded. The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices. You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response. For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If you provide an additional checksum value in your MultipartUpload requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt action for the CompleteMultipartUpload request to succeed. Special errors Error Code: EntityTooSmall Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. HTTP Status Code: 400 Bad Request Error Code: InvalidPart Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. HTTP Status Code: 400 Bad Request Error Code: InvalidPartOrder Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. HTTP Status Code: 400 Bad Request Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultipartUpload ListParts ListMultipartUploads + /// Completes a multipart upload by assembling previously uploaded parts. You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded. The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices. You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response. For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide. If you provide an additional checksum value in your MultipartUpload requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt action for the CompleteMultipartUpload request to succeed. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Special errors Error Code: EntityTooSmall Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. HTTP Status Code: 400 Bad Request Error Code: InvalidPart Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. HTTP Status Code: 400 Bad Request Error Code: InvalidPartOrder Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. HTTP Status Code: 400 Bad Request Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultipartUpload ListParts ListMultipartUploads @Sendable @inlinable public func completeMultipartUpload(_ input: CompleteMultipartUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CompleteMultipartUploadOutput { @@ -278,13 +278,13 @@ public struct S3: AWSService { logger: logger ) } - /// Completes a multipart upload by assembling previously uploaded parts. You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded. The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices. You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response. For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If you provide an additional checksum value in your MultipartUpload requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt action for the CompleteMultipartUpload request to succeed. Special errors Error Code: EntityTooSmall Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. HTTP Status Code: 400 Bad Request Error Code: InvalidPart Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. HTTP Status Code: 400 Bad Request Error Code: InvalidPartOrder Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. HTTP Status Code: 400 Bad Request Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultipartUpload ListParts ListMultipartUploads + /// Completes a multipart upload by assembling previously uploaded parts. You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded. The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices. You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response. For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide. If you provide an additional checksum value in your MultipartUpload requests and the object is encrypted with Key Management Service, you must have permission to use the kms:Decrypt action for the CompleteMultipartUpload request to succeed. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Special errors Error Code: EntityTooSmall Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part. HTTP Status Code: 400 Bad Request Error Code: InvalidPart Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified ETag might not have matched the uploaded part's ETag. HTTP Status Code: 400 Bad Request Error Code: InvalidPartOrder Description: The list of parts was not in ascending order. The parts list must be specified in order by part number. HTTP Status Code: 400 Bad Request Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultipartUpload ListParts ListMultipartUploads /// /// Parameters: /// - bucket: Name of the bucket to which the multipart upload was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). @@ -334,7 +334,7 @@ public struct S3: AWSService { return try await self.completeMultipartUpload(input, logger: logger) } - /// Creates a copy of an object that is already stored in Amazon S3. You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Authentication and authorization All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have read access to the source object and write access to the destination bucket. General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation. If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. If the copy is successful, you receive a response with information about the copied object. A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject + /// Creates a copy of an object that is already stored in Amazon S3. You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Authentication and authorization All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have read access to the source object and write access to the destination bucket. General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation. If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket. If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. If the copy is successful, you receive a response with information about the copied object. A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject @Sendable @inlinable public func copyObject(_ input: CopyObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CopyObjectOutput { @@ -347,12 +347,12 @@ public struct S3: AWSService { logger: logger ) } - /// Creates a copy of an object that is already stored in Amazon S3. You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Authentication and authorization All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have read access to the source object and write access to the destination bucket. General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation. If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. If the copy is successful, you receive a response with information about the copied object. A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject + /// Creates a copy of an object that is already stored in Amazon S3. You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets. Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration. Authentication and authorization All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have read access to the source object and write access to the destination bucket. General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation. If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket. If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. If the copy is successful, you receive a response with information about the copied object. A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed. If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CopyObject: PutObject GetObject /// /// Parameters: /// - acl: The canned access control list (ACL) to apply to the object. When you copy an object, the ACL metadata is not preserved and is set to private by default. Only the owner has full access control. To override the default ACL setting, specify a new ACL when you generate a copy request. For more information, see Using ACLs. If the destination bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide. If your destination bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner. This functionality is not supported for directory buckets. This functionality is not supported for Amazon S3 on Outposts. /// - bucket: The name of the destination bucket. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. This functionality is not supported when the destination bucket is a directory bucket. + /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets /// - cacheControl: Specifies the caching behavior along the request/reply chain. /// - checksumAlgorithm: Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide. When you copy an object, if the source object has a checksum, that checksum value will be copied to the new object by default. If the CopyObject request does not include this x-amz-checksum-algorithm header, the checksum algorithm will be copied from the source object to the destination object (if it's present on the source object). You can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported values will respond with the HTTP status code 400 Bad Request. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. /// - contentDisposition: Specifies presentational information for the object. Indicates whether an object should be displayed in a web browser or downloaded as a file. It allows specifying the desired filename for the downloaded file. @@ -381,12 +381,12 @@ public struct S3: AWSService { /// - objectLockMode: The Object Lock mode that you want to apply to the object copy. This functionality is not supported for directory buckets. /// - objectLockRetainUntilDate: The date and time when you want the Object Lock of the object copy to expire. This functionality is not supported for directory buckets. /// - requestPayer: - /// - serverSideEncryption: The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response. Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// - serverSideEncryption: The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response. Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. General purpose buckets For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. Directory buckets For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration. /// - sseCustomerAlgorithm: Specifies the algorithm to use when encrypting the object (for example, AES256). When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. This functionality is not supported when the destination bucket is a directory bucket. /// - sseCustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded. Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This functionality is not supported when the destination bucket is a directory bucket. /// - sseCustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported when the destination bucket is a directory bucket. - /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value must be explicitly added to specify encryption context for CopyObject requests. This functionality is not supported when the destination bucket is a directory bucket. - /// - ssekmsKeyId: Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. This functionality is not supported when the destination bucket is a directory bucket. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. General purpose buckets - This value must be explicitly added to specify encryption context for CopyObject requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsKeyId: Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - storageClass: If the x-amz-storage-class header is not used, the copied object will be stored in the STANDARD Storage Class by default. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. /// - tagging: The tag-set for the object copy in the destination bucket. This value must be used in conjunction with the x-amz-tagging-directive if you choose REPLACE for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, you don't need to set the x-amz-tagging header, because the tag-set will be copied from the source object directly. The tag-set must be encoded as URL Query parameters. The default value is the empty value. Directory buckets - For directory buckets in a CopyObject operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a 501 Not Implemented status code. /// - taggingDirective: Specifies whether the object tag-set is copied from the source object or replaced with the tag-set that's provided in the request. The default value is COPY. Directory buckets - For directory buckets in a CopyObject operation, only the empty tag-set is supported. Any requests that attempt to write non-empty tags into directory buckets will receive a 501 Not Implemented status code. @@ -542,7 +542,8 @@ public struct S3: AWSService { } /// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide. After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload. If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration. Directory buckets - S3 Lifecycle is not supported by directory buckets. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Request signing For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data. To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide. Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CreateMultipartUpload: UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data. To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket. + /// For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CreateMultipartUpload: UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads @Sendable @inlinable public func createMultipartUpload(_ input: CreateMultipartUploadRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMultipartUploadOutput { @@ -556,12 +557,13 @@ public struct S3: AWSService { ) } /// This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide. After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload. If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration. Directory buckets - S3 Lifecycle is not supported by directory buckets. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Request signing For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service (KMS) KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data. To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide. Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CreateMultipartUpload: UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data. To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key policy and your IAM user or role. All GET and PUT requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C) in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket. + /// For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to CreateMultipartUpload: UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads /// /// Parameters: /// - acl: The canned ACL to apply to the object. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL in the Amazon S3 User Guide. By default, all objects are private. Only the owner has full access control. When uploading an object, you can grant access permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the new object. For more information, see Using ACLs. One way to grant the permissions using the request headers is to specify a canned ACL with the x-amz-acl request header. This functionality is not supported for directory buckets. This functionality is not supported for Amazon S3 on Outposts. /// - bucket: The name of the bucket where the multipart upload is initiated and where the object is uploaded. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not supported for directory buckets. + /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets /// - cacheControl: Specifies caching behavior along the request/reply chain. /// - checksumAlgorithm: Indicates the algorithm that you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - contentDisposition: Specifies presentational information for the object. @@ -580,12 +582,12 @@ public struct S3: AWSService { /// - objectLockMode: Specifies the Object Lock mode that you want to apply to the uploaded object. This functionality is not supported for directory buckets. /// - objectLockRetainUntilDate: Specifies the date and time when you want the Object Lock to expire. This functionality is not supported for directory buckets. /// - requestPayer: - /// - serverSideEncryption: The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// - serverSideEncryption: The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket. /// - sseCustomerAlgorithm: Specifies the algorithm to use when encrypting the object (for example, AES256). This functionality is not supported for directory buckets. /// - sseCustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This functionality is not supported for directory buckets. /// - sseCustomerKeyMD5: Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. - /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This functionality is not supported for directory buckets. - /// - ssekmsKeyId: Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption. This functionality is not supported for directory buckets. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsKeyId: Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - storageClass: By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide. For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. /// - tagging: The tag-set for the object. The tag-set must be encoded as URL Query parameters. This functionality is not supported for directory buckets. /// - websiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. This functionality is not supported for directory buckets. @@ -659,7 +661,9 @@ public struct S3: AWSService { return try await self.createMultipartUpload(input, logger: logger) } - /// Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject. HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket. Permissions To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession permission. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. + /// Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. CopyObject API operation - Unlike other Zonal endpoint API operations, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject. HeadBucket API operation - Unlike other Zonal endpoint API operations, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket. Permissions To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission. If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. Encryption For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, + /// you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session. Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession request. + /// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. @Sendable @inlinable public func createSession(_ input: CreateSessionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSessionOutput { @@ -672,21 +676,35 @@ public struct S3: AWSService { logger: logger ) } - /// Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject. HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket. Permissions To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession permission. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. + /// Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval. If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide. You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. CopyObject API operation - Unlike other Zonal endpoint API operations, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject. HeadBucket API operation - Unlike other Zonal endpoint API operations, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket. Permissions To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission. If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. Encryption For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, + /// you authenticate and authorize requests through CreateSession for low latency. To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session. Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), it's not supported to override the values of the encryption settings from the CreateSession request. + /// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. /// /// Parameters: /// - bucket: The name of the bucket that you create a session for. - /// - sessionMode: Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint APIs on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. + /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using KMS keys (SSE-KMS). S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// - serverSideEncryption: The server-side encryption algorithm to use when you store objects in the directory bucket. For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. + /// - sessionMode: Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint API operations on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint API operations: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsKeyId: If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - logger: Logger use during operation @inlinable public func createSession( bucket: String, + bucketKeyEnabled: Bool? = nil, + serverSideEncryption: ServerSideEncryption? = nil, sessionMode: SessionMode? = nil, + ssekmsEncryptionContext: String? = nil, + ssekmsKeyId: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> CreateSessionOutput { let input = CreateSessionRequest( bucket: bucket, - sessionMode: sessionMode + bucketKeyEnabled: bucketKeyEnabled, + serverSideEncryption: serverSideEncryption, + sessionMode: sessionMode, + ssekmsEncryptionContext: ssekmsEncryptionContext, + ssekmsKeyId: ssekmsKeyId ) return try await self.createSession(input, logger: logger) } @@ -792,7 +810,7 @@ public struct S3: AWSService { return try await self.deleteBucketCors(input, logger: logger) } - /// This operation is not supported by directory buckets. This implementation of the DELETE action resets the default encryption for the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related to DeleteBucketEncryption: PutBucketEncryption GetBucketEncryption + /// This implementation of the DELETE action resets the default encryption for the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets. Permissions General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources. Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to DeleteBucketEncryption: PutBucketEncryption GetBucketEncryption @Sendable @inlinable public func deleteBucketEncryption(_ input: DeleteBucketEncryptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -805,11 +823,11 @@ public struct S3: AWSService { logger: logger ) } - /// This operation is not supported by directory buckets. This implementation of the DELETE action resets the default encryption for the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related to DeleteBucketEncryption: PutBucketEncryption GetBucketEncryption + /// This implementation of the DELETE action resets the default encryption for the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets. Permissions General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources. Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to DeleteBucketEncryption: PutBucketEncryption GetBucketEncryption /// /// Parameters: - /// - bucket: The name of the bucket containing the server-side encryption configuration to delete. - /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). + /// - bucket: The name of the bucket containing the server-side encryption configuration to delete. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide + /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code /// - logger: Logger use during operation @inlinable public func deleteBucketEncryption( @@ -1120,8 +1138,7 @@ public struct S3: AWSService { return try await self.deleteBucketWebsite(input, logger: logger) } - /// Removes an object from a bucket. The behavior depends on the bucket's versioning state: If bucket versioning is not enabled, the operation permanently deletes the object. If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket. If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request. Directory buckets - MFA delete is not supported by directory buckets. You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following action is related to DeleteObject: PutObject + /// Removes an object from a bucket. The behavior depends on the bucket's versioning state. For more information, see Best practices to consider before deleting an object. To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA delete and to see example requests, see Using MFA delete and Sample request in the Amazon S3 User Guide. S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. MFA delete is not supported by directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission. You can also use PutBucketLifecycle to delete objects in Amazon S3. s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration permissions. Directory buckets permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following action is related to DeleteObject: PutObject @Sendable @inlinable public func deleteObject(_ input: DeleteObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteObjectOutput { @@ -1134,8 +1151,7 @@ public struct S3: AWSService { logger: logger ) } - /// Removes an object from a bucket. The behavior depends on the bucket's versioning state: If bucket versioning is not enabled, the operation permanently deletes the object. If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket. If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request. Directory buckets - MFA delete is not supported by directory buckets. You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following action is related to DeleteObject: PutObject + /// Removes an object from a bucket. The behavior depends on the bucket's versioning state. For more information, see Best practices to consider before deleting an object. To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If the object you want to delete is in a bucket where the bucket versioning configuration is MFA delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA delete and to see example requests, see Using MFA delete and Sample request in the Amazon S3 User Guide. S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. MFA delete is not supported by directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always have the s3:DeleteObject permission. You can also use PutBucketLifecycle to delete objects in Amazon S3. s3:DeleteObjectVersion - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration permissions. Directory buckets permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following action is related to DeleteObject: PutObject /// /// Parameters: /// - bucket: The bucket name of the bucket containing the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. @@ -1227,7 +1243,7 @@ public struct S3: AWSService { /// Parameters: /// - bucket: The bucket name containing the objects to delete. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. /// - bypassGovernanceRetention: Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention permission. This functionality is not supported for directory buckets. - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. /// - delete: Container for the request. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - mfa: The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA delete enabled. When performing the DeleteObjects operation on an MFA delete enabled bucket, which attempts to delete the specified versioned objects, you must include an MFA token. If you don't provide an MFA token, the entire request will fail, even if there are non-versioned objects that you are trying to delete. If you provide an invalid token, whether there are versioned object keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide. This functionality is not supported for directory buckets. @@ -1430,7 +1446,7 @@ public struct S3: AWSService { return try await self.getBucketCors(input, logger: logger) } - /// This operation is not supported by directory buckets. Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to GetBucketEncryption: PutBucketEncryption DeleteBucketEncryption + /// Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets. Permissions General purpose bucket permissions - The s3:GetEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources. Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to GetBucketEncryption: PutBucketEncryption DeleteBucketEncryption @Sendable @inlinable public func getBucketEncryption(_ input: GetBucketEncryptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetBucketEncryptionOutput { @@ -1443,11 +1459,11 @@ public struct S3: AWSService { logger: logger ) } - /// This operation is not supported by directory buckets. Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to GetBucketEncryption: PutBucketEncryption DeleteBucketEncryption + /// Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets - For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. For information about the default encryption configuration in directory buckets, see Setting default server-side encryption behavior for directory buckets. Permissions General purpose bucket permissions - The s3:GetEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources. Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to GetBucketEncryption: PutBucketEncryption DeleteBucketEncryption /// /// Parameters: - /// - bucket: The name of the bucket from which the server-side encryption configuration is retrieved. - /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). + /// - bucket: The name of the bucket from which the server-side encryption configuration is retrieved. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide + /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code /// - logger: Logger use during operation @inlinable public func getBucketEncryption( @@ -1959,8 +1975,8 @@ public struct S3: AWSService { } /// Retrieves an object from Amazon S3. In the GetObject request, specify the full key name for the object. General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the object key name as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket--use1-az5--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide. If you include a versionId in your request header, you must have the s3:GetObjectVersion permission to access a specific version of an object. The s3:GetObject permission is not required in this scenario. If you request the current version of an object without a specific versionId in the request header, only the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario. If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Access Denied error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Storage classes If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. - /// Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. Overriding response header values through the request There are times when you want to override certain response header values of a GetObject response. For example, you might override the Content-Disposition response header value through your GetObject request. You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GetObject response are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, and Expires. To override values for a set of response headers in the GetObject response, you can use the following query parameters in the request. response-cache-control response-content-disposition response-content-encoding response-content-language response-content-type response-expires When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to GetObject: ListBuckets GetObjectAcl + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Storage classes If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. + /// Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. Overriding response header values through the request There are times when you want to override certain response header values of a GetObject response. For example, you might override the Content-Disposition response header value through your GetObject request. You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GetObject response are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, and Expires. To override values for a set of response headers in the GetObject response, you can use the following query parameters in the request. response-cache-control response-content-disposition response-content-encoding response-content-language response-content-type response-expires When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to GetObject: ListBuckets GetObjectAcl @Sendable @inlinable public func getObject(_ input: GetObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetObjectOutput { @@ -1974,12 +1990,12 @@ public struct S3: AWSService { ) } /// Retrieves an object from Amazon S3. In the GetObject request, specify the full key name for the object. General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the object key name as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket--use1-az5--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation returns the object without using an authorization header. For more information, see Specifying permissions in a policy in the Amazon S3 User Guide. If you include a versionId in your request header, you must have the s3:GetObjectVersion permission to access a specific version of an object. The s3:GetObject permission is not required in this scenario. If you request the current version of an object without a specific versionId in the request header, only the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario. If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Access Denied error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Storage classes If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. - /// Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. Overriding response header values through the request There are times when you want to override certain response header values of a GetObject response. For example, you might override the Content-Disposition response header value through your GetObject request. You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GetObject response are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, and Expires. To override values for a set of response headers in the GetObject response, you can use the following query parameters in the request. response-cache-control response-content-disposition response-content-encoding response-content-language response-content-type response-expires When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to GetObject: ListBuckets GetObjectAcl + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted using SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Storage classes If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectState error. For information about restoring archived objects, see Restoring Archived Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. + /// Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. Overriding response header values through the request There are times when you want to override certain response header values of a GetObject response. For example, you might override the Content-Disposition response header value through your GetObject request. You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GetObject response are Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, and Expires. To override values for a set of response headers in the GetObject response, you can use the following query parameters in the request. response-cache-control response-content-disposition response-content-encoding response-content-language response-content-type response-expires When you use these parameters, you must sign the request by using either an Authorization header or a presigned URL. These parameters cannot be used with an unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to GetObject: ListBuckets GetObjectAcl /// /// Parameters: /// - bucket: The bucket name containing the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - checksumMode: To retrieve the checksum, this mode must be enabled. In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. + /// - checksumMode: To retrieve the checksum, this mode must be enabled. General purpose buckets - In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - ifMatch: Return the object only if its entity tag (ETag) is the same as the one specified in this header; otherwise, return a 412 Precondition Failed error. If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested. For more information about conditional requests, see RFC 7232. /// - ifModifiedSince: Return the object only if it has been modified since the specified time; otherwise, return a 304 Not Modified error. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified status code. For more information about conditional requests, see RFC 7232. @@ -2092,8 +2108,8 @@ public struct S3: AWSService { return try await self.getObjectAcl(input, logger: logger) } - /// Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation with depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found ("no such key") error. If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden ("access denied") error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Versioning Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Conditional request headers Consider the following when using request headers: If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since condition evaluates to false. For more information about conditional requests, see RFC 7232. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since condition evaluates to true. For more information about conditional requests, see RFC 7232. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to GetObjectAttributes: GetObject GetObjectAcl GetObjectLegalHold GetObjectLockConfiguration GetObjectRetention GetObjectTagging HeadObject ListParts + /// Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found ("no such key") error. If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden ("access denied") error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. Versioning Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Conditional request headers Consider the following when using request headers: If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since condition evaluates to false. For more information about conditional requests, see RFC 7232. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since condition evaluates to true. For more information about conditional requests, see RFC 7232. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to GetObjectAttributes: GetObject GetObjectAcl GetObjectLegalHold GetObjectLockConfiguration GetObjectRetention GetObjectTagging HeadObject ListParts @Sendable @inlinable public func getObjectAttributes(_ input: GetObjectAttributesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetObjectAttributesOutput { @@ -2106,8 +2122,8 @@ public struct S3: AWSService { logger: logger ) } - /// Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation with depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found ("no such key") error. If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden ("access denied") error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Versioning Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Conditional request headers Consider the following when using request headers: If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since condition evaluates to false. For more information about conditional requests, see RFC 7232. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since condition evaluates to true. For more information about conditional requests, see RFC 7232. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to GetObjectAttributes: GetObject GetObjectAcl GetObjectLegalHold GetObjectLockConfiguration GetObjectRetention GetObjectTagging HeadObject ListParts + /// Retrieves all the metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. GetObjectAttributes combines the functionality of HeadObject and ListParts. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To use GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions for this operation. If the bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found ("no such key") error. If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden ("access denied") error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a GET request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. Versioning Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. Conditional request headers Consider the following when using request headers: If both of the If-Match and If-Unmodified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since condition evaluates to false. For more information about conditional requests, see RFC 7232. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since condition evaluates to true. For more information about conditional requests, see RFC 7232. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to GetObjectAttributes: GetObject GetObjectAcl GetObjectLegalHold GetObjectLockConfiguration GetObjectRetention GetObjectTagging HeadObject ListParts /// /// Parameters: /// - bucket: The name of the bucket that contains the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. @@ -2410,8 +2426,8 @@ public struct S3: AWSService { return try await self.headBucket(input, logger: logger) } - /// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes. Request headers are limited to 8 KB in size. For more information, see Common Request Headers. Permissions General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Versioning If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response. If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header. Directory buckets - Delete marker is not supported by directory buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. The following actions are related to HeadObject: GetObject GetObjectAttributes + /// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes. Request headers are limited to 8 KB in size. For more information, see Common Request Headers. Permissions General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If you enable x-amz-checksum-mode in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. Versioning If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response. If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header. Directory buckets - Delete marker is not supported by directory buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. The following actions are related to HeadObject: GetObject GetObjectAttributes @Sendable @inlinable public func headObject(_ input: HeadObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> HeadObjectOutput { @@ -2424,12 +2440,12 @@ public struct S3: AWSService { logger: logger ) } - /// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes. Request headers are limited to 8 KB in size. For more information, see Common Request Headers. Permissions General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Versioning If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response. If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header. Directory buckets - Delete marker is not supported by directory buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. The following actions are related to HeadObject: GetObject GetObjectAttributes + /// The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not possible to retrieve the exact exception of these error codes. Request headers are limited to 8 KB in size. For more information, see Common Request Headers. Permissions General purpose bucket permissions - To use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation. For more information, see Actions, resources, and condition keys for Amazon S3 in the Amazon S3 User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If you enable x-amz-checksum-mode in the request and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object. Encryption Encryption request headers, like x-amz-server-side-encryption, should not be sent for HEAD requests if your object uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. If you include this header in a HEAD request for an object that uses these types of keys, you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object. If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are: x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. Versioning If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response. If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header. Directory buckets - Delete marker is not supported by directory buckets. Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. The following actions are related to HeadObject: GetObject GetObjectAttributes /// /// Parameters: /// - bucket: The name of the bucket that contains the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - checksumMode: To retrieve the checksum, this parameter must be enabled. In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. + /// - checksumMode: To retrieve the checksum, this parameter must be enabled. General purpose buckets - If you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. Directory buckets - If you enable ChecksumMode and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - ifMatch: Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error. If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon S3 returns 200 OK and the data requested. For more information about conditional requests, see RFC 7232. /// - ifModifiedSince: Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; Then Amazon S3 returns the 304 Not Modified response code. For more information about conditional requests, see RFC 7232. @@ -2654,18 +2670,24 @@ public struct S3: AWSService { /// This operation is not supported by directory buckets. Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets permission. For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets. /// /// Parameters: + /// - bucketRegion: Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints. Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2. /// - continuationToken: ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results. Length Constraints: Minimum length of 0. Maximum length of 1024. Required: No. /// - maxBuckets: Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response. + /// - prefix: Limits the response to bucket names that begin with the specified bucket name prefix. /// - logger: Logger use during operation @inlinable public func listBuckets( + bucketRegion: String? = nil, continuationToken: String? = nil, maxBuckets: Int? = nil, + prefix: String? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> ListBucketsOutput { let input = ListBucketsRequest( + bucketRegion: bucketRegion, continuationToken: continuationToken, - maxBuckets: maxBuckets + maxBuckets: maxBuckets, + prefix: prefix ) return try await self.listBuckets(input, logger: logger) } @@ -3163,7 +3185,11 @@ public struct S3: AWSService { return try await self.putBucketCors(input, logger: logger) } - /// This operation is not supported by directory buckets. This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption + /// This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket. Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. + /// For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests. Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. + /// The Amazon Web Services managed key (aws/s3) isn't supported. + /// S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). Permissions General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption @Sendable @inlinable public func putBucketEncryption(_ input: PutBucketEncryptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws { @@ -3176,13 +3202,17 @@ public struct S3: AWSService { logger: logger ) } - /// This operation is not supported by directory buckets. This action uses the encryption subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption + /// This operation configures default encryption and Amazon S3 Bucket Keys for an existing bucket. Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. + /// For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). General purpose buckets You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests. Directory buckets - You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. + /// The Amazon Web Services managed key (aws/s3) isn't supported. + /// S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4). Permissions General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide. To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. The following operations are related to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption /// /// Parameters: - /// - bucket: Specifies default encryption for a bucket using server-side encryption with different key options. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. - /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). + /// - bucket: Specifies default encryption for a bucket using server-side encryption with different key options. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// - contentMD5: The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. + /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code /// - serverSideEncryptionConfiguration: /// - logger: Logger use during operation @inlinable @@ -3277,10 +3307,10 @@ public struct S3: AWSService { return try await self.putBucketInventoryConfiguration(input, logger: logger) } - /// This operation is not supported by directory buckets. Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle. Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle. Rules You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following: A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these. A status indicating whether the rule is in effect. One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions. For more information, see Object Lifecycle Management and Lifecycle Configuration Elements. Permissions By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration GetBucketLifecycleConfiguration DeleteBucketLifecycle + /// This operation is not supported by directory buckets. Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle. Rules You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle. A lifecycle rule consists of the following: A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these. A status indicating whether the rule is in effect. One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions. For more information, see Object Lifecycle Management and Lifecycle Configuration Elements. Permissions By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration GetBucketLifecycleConfiguration DeleteBucketLifecycle @Sendable @inlinable - public func putBucketLifecycleConfiguration(_ input: PutBucketLifecycleConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws { + public func putBucketLifecycleConfiguration(_ input: PutBucketLifecycleConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutBucketLifecycleConfigurationOutput { try await self.client.execute( operation: "PutBucketLifecycleConfiguration", path: "/{Bucket}?lifecycle", @@ -3290,13 +3320,14 @@ public struct S3: AWSService { logger: logger ) } - /// This operation is not supported by directory buckets. Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle. Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle. Rules You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Each rule consists of the following: A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these. A status indicating whether the rule is in effect. One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions. For more information, see Object Lifecycle Management and Lifecycle Configuration Elements. Permissions By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration GetBucketLifecycleConfiguration DeleteBucketLifecycle + /// This operation is not supported by directory buckets. Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle. Rules You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable. Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle. A lifecycle rule consists of the following: A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these. A status indicating whether the rule is in effect. One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions. For more information, see Object Lifecycle Management and Lifecycle Configuration Elements. Permissions By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission. You can also explicitly deny permissions. An explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions: s3:DeleteObject s3:DeleteObjectVersion s3:PutLifecycleConfiguration For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources. The following operations are related to PutBucketLifecycleConfiguration: Examples of Lifecycle Configuration GetBucketLifecycleConfiguration DeleteBucketLifecycle /// /// Parameters: /// - bucket: The name of the bucket for which to set the configuration. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - lifecycleConfiguration: Container for lifecycle rules. You can add as many as 1,000 rules. + /// - transitionDefaultMinimumObjectSize: Indicates which default minimum object size behavior is applied to the lifecycle configuration. all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default. varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB. To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior. /// - logger: Logger use during operation @inlinable public func putBucketLifecycleConfiguration( @@ -3304,13 +3335,15 @@ public struct S3: AWSService { checksumAlgorithm: ChecksumAlgorithm? = nil, expectedBucketOwner: String? = nil, lifecycleConfiguration: BucketLifecycleConfiguration? = nil, + transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? = nil, logger: Logger = AWSClient.loggingDisabled - ) async throws { + ) async throws -> PutBucketLifecycleConfigurationOutput { let input = PutBucketLifecycleConfigurationRequest( bucket: bucket, checksumAlgorithm: checksumAlgorithm, expectedBucketOwner: expectedBucketOwner, - lifecycleConfiguration: lifecycleConfiguration + lifecycleConfiguration: lifecycleConfiguration, + transitionDefaultMinimumObjectSize: transitionDefaultMinimumObjectSize ) return try await self.putBucketLifecycleConfiguration(input, logger: logger) } @@ -3489,7 +3522,7 @@ public struct S3: AWSService { /// /// Parameters: /// - bucket: The name of the bucket. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. /// - confirmRemoveSelfBucketAccess: Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future. This functionality is not supported for directory buckets. /// - contentMD5: The MD5 hash of the request body. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code @@ -3728,7 +3761,7 @@ public struct S3: AWSService { } /// Adds an object to a bucket. Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior: S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide. This functionality is not supported for directory buckets. S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning. This functionality is not supported for directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your PutObject request includes specific headers. s3:PutObject - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object to it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Data integrity with Content-MD5 General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory bucket - This functionality is not supported for directory buckets. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For more information about related Amazon S3 APIs, see the following: CopyObject DeleteObject + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Data integrity with Content-MD5 General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory bucket - This functionality is not supported for directory buckets. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For more information about related Amazon S3 APIs, see the following: CopyObject DeleteObject @Sendable @inlinable public func putObject(_ input: PutObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> PutObjectOutput { @@ -3742,24 +3775,24 @@ public struct S3: AWSService { ) } /// Adds an object to a bucket. Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values. If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All objects written to the bucket by any account will be owned by the bucket owner. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior: S3 Object Lock - To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock in the Amazon S3 User Guide. This functionality is not supported for directory buckets. S3 Versioning - When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID of that object being stored in Amazon S3. You can retrieve, replace, or delete any version of the object. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning. This functionality is not supported for directory buckets. Permissions General purpose bucket permissions - The following permissions are required in your policies when your PutObject request includes specific headers. s3:PutObject - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object to it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully set the tag-set with your PutObject request, you must have the s3:PutObjectTagging. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Data integrity with Content-MD5 General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory bucket - This functionality is not supported for directory buckets. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For more information about related Amazon S3 APIs, see the following: CopyObject DeleteObject + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Data integrity with Content-MD5 General purpose bucket - To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory bucket - This functionality is not supported for directory buckets. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. For more information about related Amazon S3 APIs, see the following: CopyObject DeleteObject /// /// Parameters: /// - acl: The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide. When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400 error with the error code AccessControlListNotSupported. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide. This functionality is not supported for directory buckets. This functionality is not supported for Amazon S3 on Outposts. /// - body: Object data. /// - bucket: The bucket name to which the PUT action was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not supported for directory buckets. + /// - bucketKeyEnabled: Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets /// - cacheControl: Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. - /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - contentDisposition: Specifies presentational information for the object. For more information, see https://www.rfc-editor.org/rfc/rfc6266#section-4. /// - contentEncoding: Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding. /// - contentLanguage: The language the content is in. /// - contentLength: Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length. - /// - contentMD5: The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide. This functionality is not supported for directory buckets. + /// - contentMD5: The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets. /// - contentType: A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - expires: The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3. @@ -3774,12 +3807,12 @@ public struct S3: AWSService { /// - objectLockMode: The Object Lock mode that you want to apply to this object. This functionality is not supported for directory buckets. /// - objectLockRetainUntilDate: The date and time when you want this object's Object Lock to expire. Must be formatted as a timestamp parameter. This functionality is not supported for directory buckets. /// - requestPayer: - /// - serverSideEncryption: The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. + /// - serverSideEncryption: The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket. /// - sseCustomerAlgorithm: Specifies the algorithm to use when encrypting the object (for example, AES256). This functionality is not supported for directory buckets. /// - sseCustomerKey: Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header. This functionality is not supported for directory buckets. /// - sseCustomerKeyMD5: Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. - /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations. This functionality is not supported for directory buckets. - /// - ssekmsKeyId: If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID. This functionality is not supported for directory buckets. + /// - ssekmsEncryptionContext: Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + /// - ssekmsKeyId: Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. /// - storageClass: By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide. For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. /// - tagging: The tag-set for the object. The tag-set must be encoded as URL Query parameters. (For example, "Key1=Value1") This functionality is not supported for directory buckets. /// - websiteRedirectLocation: If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see Object Key and Metadata in the Amazon S3 User Guide. In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket: x-amz-website-redirect-location: /anotherPage.html In the following example, the request header sets the object redirect to another website: x-amz-website-redirect-location: http://www.example.com/ For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects in the Amazon S3 User Guide. This functionality is not supported for directory buckets. @@ -4178,7 +4211,7 @@ public struct S3: AWSService { return try await self.putPublicAccessBlock(input, logger: logger) } - /// This operation is not supported by directory buckets. The SELECT job type for the RestoreObject operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more Restores an archived copy of an object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. This action performs the following types of requests: restore an archive - Restore an archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version. When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body: Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object. If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. If the object is previously restored, Amazon S3 returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration + /// This operation is not supported by directory buckets. Restores an archived copy of an object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. This action performs the following types of requests: restore an archive - Restore an archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version. When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body: Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object. If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. If the object is previously restored, Amazon S3 returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration @Sendable @inlinable public func restoreObject(_ input: RestoreObjectRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RestoreObjectOutput { @@ -4191,7 +4224,7 @@ public struct S3: AWSService { logger: logger ) } - /// This operation is not supported by directory buckets. The SELECT job type for the RestoreObject operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more Restores an archived copy of an object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. This action performs the following types of requests: restore an archive - Restore an archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version. When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body: Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object. If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. If the object is previously restored, Amazon S3 returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration + /// This operation is not supported by directory buckets. Restores an archived copy of an object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. This action performs the following types of requests: restore an archive - Restore an archived object For more information about the S3 structure in the request body, see the following: PutObject Managing Access with ACLs in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version. When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body: Expedited - Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering. Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide. To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide. After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object. If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide. Responses A successful action returns either the 200 OK or 202 Accepted status code. If the object is not previously restored, then Amazon S3 returns 202 Accepted in the response. If the object is previously restored, Amazon S3 returns 200 OK in the response. Special errors: Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A The following operations are related to RestoreObject: PutBucketLifecycleConfiguration GetBucketNotificationConfiguration /// /// Parameters: /// - bucket: The bucket name containing the object to restore. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. @@ -4225,7 +4258,7 @@ public struct S3: AWSService { return try await self.restoreObject(input, logger: logger) } - /// This operation is not supported by directory buckets. The SelectObjectContent operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the operation as usual. Learn more This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. This functionality is not supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. Permissions You must have the s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to query objects that have the following format properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption. For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide. Working with the Response Body Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide. Special Errors For a list of special errors for this operation, see List of SELECT Object Content Error Codes The following operations are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration + /// This operation is not supported by directory buckets. This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. This functionality is not supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. Permissions You must have the s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to query objects that have the following format properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption. For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide. Working with the Response Body Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide. Special Errors For a list of special errors for this operation, see List of SELECT Object Content Error Codes The following operations are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration @Sendable @inlinable public func selectObjectContent(_ input: SelectObjectContentRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SelectObjectContentOutput { @@ -4238,7 +4271,7 @@ public struct S3: AWSService { logger: logger ) } - /// This operation is not supported by directory buckets. The SelectObjectContent operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the operation as usual. Learn more This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. This functionality is not supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. Permissions You must have the s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to query objects that have the following format properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption. For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide. Working with the Response Body Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide. Special Errors For a list of special errors for this operation, see List of SELECT Object Content Error Codes The following operations are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration + /// This operation is not supported by directory buckets. This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response. This functionality is not supported for Amazon S3 on Outposts. For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide. Permissions You must have the s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to query objects that have the following format properties: CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects. Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption. For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide. Working with the Response Body Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response. GetObject Support The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject. Range: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return. The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide. Special Errors For a list of special errors for this operation, see List of SELECT Object Content Error Codes The following operations are related to SelectObjectContent: GetObject GetBucketLifecycleConfiguration PutBucketLifecycleConfiguration /// /// Parameters: /// - bucket: The S3 bucket. @@ -4288,7 +4321,7 @@ public struct S3: AWSService { } /// Uploads a part in a multipart upload. In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation. You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request. Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide. After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide . Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Data integrity General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4). Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. Encryption General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload. If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPart: CreateMultipartUpload CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Data integrity General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4). Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. Encryption General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload. If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPart: CreateMultipartUpload CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads @Sendable @inlinable public func uploadPart(_ input: UploadPartRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UploadPartOutput { @@ -4302,14 +4335,14 @@ public struct S3: AWSService { ) } /// Uploads a part in a multipart upload. In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation. You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request. Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide. After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage. For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide . Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions - To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. - /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . Data integrity General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4). Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. Encryption General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload. If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPart: CreateMultipartUpload CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + /// Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession . If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. Data integrity General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4). Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity. Encryption General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload. If you request server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPart: CreateMultipartUpload CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads /// /// Parameters: /// - body: Object data. /// - bucket: The name of the bucket to which the multipart upload was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. /// - checksumAlgorithm: Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must be the same for all parts and it match the checksum value supplied in the CreateMultipartUpload request. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. /// - contentLength: Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. @@ -4366,7 +4399,8 @@ public struct S3: AWSService { return try await self.uploadPart(input, logger: logger) } - /// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide. Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request. You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have READ access to the source object and WRITE access to the destination bucket. General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation. If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Encryption General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart. Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found Error Code: InvalidRequest Description: The specified copy source is not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPartCopy: CreateMultipartUpload UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + /// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide. Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request. You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have READ access to the source object and WRITE access to the destination bucket. General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation. If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination. If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Encryption General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found Error Code: InvalidRequest Description: The specified copy source is not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPartCopy: CreateMultipartUpload UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads @Sendable @inlinable public func uploadPartCopy(_ input: UploadPartCopyRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UploadPartCopyOutput { @@ -4379,7 +4413,8 @@ public struct S3: AWSService { logger: logger ) } - /// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide. Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request. You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have READ access to the source object and WRITE access to the destination bucket. General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation. If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Encryption General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart. Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found Error Code: InvalidRequest Description: The specified copy source is not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPartCopy: CreateMultipartUpload UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads + /// Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request. For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide. Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request. You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request. For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication. Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf. Permissions You must have READ access to the source object and WRITE access to the destination bucket. General purpose bucket permissions - You must have the permissions in a policy based on the bucket types of your source bucket and destination bucket in an UploadPartCopy operation. If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have the s3:PutObject permission to write the object copy to the destination bucket. To perform a multipart upload with encryption using an Key Management Service key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The requester must also have permissions for the kms:GenerateDataKey action for the CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information about KMS permissions, see Protecting data using server-side encryption with KMS in the Amazon S3 User Guide. For information about the permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in an UploadPartCopy operation. If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination. If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key. For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Encryption General purpose buckets - For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. Special errors Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed. HTTP Status Code: 404 Not Found Error Code: InvalidRequest Description: The specified copy source is not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request HTTP Host header syntax Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to UploadPartCopy: CreateMultipartUpload UploadPart CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads /// /// Parameters: /// - bucket: The bucket name. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. @@ -4470,8 +4505,8 @@ public struct S3: AWSService { /// - body: The object data. /// - bucketKeyEnabled: Indicates whether the object stored in Amazon S3 uses an S3 bucket key for server-side encryption with Amazon Web Services KMS (SSE-KMS). /// - cacheControl: Specifies caching behavior along the request/reply chain. - /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. - /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// - checksumCRC32: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// - checksumCRC32C: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. /// - checksumSHA1: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. /// - checksumSHA256: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. /// - contentDisposition: Specifies presentational information for the object. @@ -4631,15 +4666,21 @@ extension S3 { /// Return PaginatorSequence for operation ``listBuckets(_:logger:)``. /// /// - Parameters: + /// - bucketRegion: Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints. Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2. /// - maxBuckets: Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response. + /// - prefix: Limits the response to bucket names that begin with the specified bucket name prefix. /// - logger: Logger used for logging @inlinable public func listBucketsPaginator( + bucketRegion: String? = nil, maxBuckets: Int? = nil, + prefix: String? = nil, logger: Logger = AWSClient.loggingDisabled ) -> AWSClient.PaginatorSequence { let input = ListBucketsRequest( - maxBuckets: maxBuckets + bucketRegion: bucketRegion, + maxBuckets: maxBuckets, + prefix: prefix ) return self.listBucketsPaginator(input, logger: logger) } @@ -4805,8 +4846,10 @@ extension S3.ListBucketsRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> S3.ListBucketsRequest { return .init( + bucketRegion: self.bucketRegion, continuationToken: token, - maxBuckets: self.maxBuckets + maxBuckets: self.maxBuckets, + prefix: self.prefix ) } } @@ -4966,7 +5009,7 @@ extension S3 { /// /// - Parameters: /// - bucket: The name of the bucket that contains the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - checksumMode: To retrieve the checksum, this parameter must be enabled. In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. + /// - checksumMode: To retrieve the checksum, this parameter must be enabled. General purpose buckets - If you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. Directory buckets - If you enable ChecksumMode and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - ifMatch: Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error. If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon S3 returns 200 OK and the data requested. For more information about conditional requests, see RFC 7232. /// - ifModifiedSince: Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; Then Amazon S3 returns the 304 Not Modified response code. For more information about conditional requests, see RFC 7232. @@ -5062,7 +5105,7 @@ extension S3 { /// /// - Parameters: /// - bucket: The name of the bucket that contains the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. - /// - checksumMode: To retrieve the checksum, this parameter must be enabled. In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. + /// - checksumMode: To retrieve the checksum, this parameter must be enabled. General purpose buckets - If you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. Directory buckets - If you enable ChecksumMode and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object. /// - expectedBucketOwner: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). /// - ifMatch: Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error. If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon S3 returns 200 OK and the data requested. For more information about conditional requests, see RFC 7232. /// - ifModifiedSince: Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error. If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; Then Amazon S3 returns the 304 Not Modified response code. For more information about conditional requests, see RFC 7232. diff --git a/Sources/Soto/Services/S3/S3_shapes.swift b/Sources/Soto/Services/S3/S3_shapes.swift index b070ce473b..3f9fb22be8 100644 --- a/Sources/Soto/Services/S3/S3_shapes.swift +++ b/Sources/Soto/Services/S3/S3_shapes.swift @@ -491,6 +491,12 @@ extension S3 { public var description: String { return self.rawValue } } + public enum TransitionDefaultMinimumObjectSize: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case allStorageClasses128K = "all_storage_classes_128K" + case variesByStorageClass = "varies_by_storage_class" + public var description: String { return self.rawValue } + } + public enum TransitionStorageClass: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case deepArchive = "DEEP_ARCHIVE" case glacier = "GLACIER" @@ -574,81 +580,6 @@ extension S3 { } } - public enum LifecycleRuleFilter: AWSEncodableShape & AWSDecodableShape, Sendable { - case and(LifecycleRuleAndOperator) - /// Minimum object size to which the rule applies. - case objectSizeGreaterThan(Int64) - /// Maximum object size to which the rule applies. - case objectSizeLessThan(Int64) - /// Prefix identifying one or more objects to which the rule applies. Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints. - case prefix(String) - /// This tag must exist in the object's tag set in order for the rule to apply. - case tag(Tag) - - public init(from decoder: Decoder) throws { - let container = try decoder.container(keyedBy: CodingKeys.self) - guard container.allKeys.count == 1, let key = container.allKeys.first else { - let context = DecodingError.Context( - codingPath: container.codingPath, - debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" - ) - throw DecodingError.dataCorrupted(context) - } - switch key { - case .and: - let value = try container.decode(LifecycleRuleAndOperator.self, forKey: .and) - self = .and(value) - case .objectSizeGreaterThan: - let value = try container.decode(Int64.self, forKey: .objectSizeGreaterThan) - self = .objectSizeGreaterThan(value) - case .objectSizeLessThan: - let value = try container.decode(Int64.self, forKey: .objectSizeLessThan) - self = .objectSizeLessThan(value) - case .prefix: - let value = try container.decode(String.self, forKey: .prefix) - self = .prefix(value) - case .tag: - let value = try container.decode(Tag.self, forKey: .tag) - self = .tag(value) - } - } - - public func encode(to encoder: Encoder) throws { - var container = encoder.container(keyedBy: CodingKeys.self) - switch self { - case .and(let value): - try container.encode(value, forKey: .and) - case .objectSizeGreaterThan(let value): - try container.encode(value, forKey: .objectSizeGreaterThan) - case .objectSizeLessThan(let value): - try container.encode(value, forKey: .objectSizeLessThan) - case .prefix(let value): - try container.encode(value, forKey: .prefix) - case .tag(let value): - try container.encode(value, forKey: .tag) - } - } - - public func validate(name: String) throws { - switch self { - case .and(let value): - try value.validate(name: "\(name).and") - case .tag(let value): - try value.validate(name: "\(name).tag") - default: - break - } - } - - private enum CodingKeys: String, CodingKey { - case and = "And" - case objectSizeGreaterThan = "ObjectSizeGreaterThan" - case objectSizeLessThan = "ObjectSizeLessThan" - case prefix = "Prefix" - case tag = "Tag" - } - } - public enum MetricsFilter: AWSEncodableShape & AWSDecodableShape, Sendable { /// The access point ARN used when evaluating a metrics filter. case accessPointArn(String) @@ -717,66 +648,6 @@ extension S3 { } } - public enum ReplicationRuleFilter: AWSEncodableShape & AWSDecodableShape, Sendable { - /// A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. For example: If you specify both a Prefix and a Tag filter, wrap these filters in an And tag. If you specify a filter based on multiple tags, wrap the Tag elements in an And tag. - case and(ReplicationRuleAndOperator) - /// An object key name prefix that identifies the subset of objects to which the rule applies. Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints. - case prefix(String) - /// A container for specifying a tag key and value. The rule applies only to objects that have the tag in their tag set. - case tag(Tag) - - public init(from decoder: Decoder) throws { - let container = try decoder.container(keyedBy: CodingKeys.self) - guard container.allKeys.count == 1, let key = container.allKeys.first else { - let context = DecodingError.Context( - codingPath: container.codingPath, - debugDescription: "Expected exactly one key, but got \(container.allKeys.count)" - ) - throw DecodingError.dataCorrupted(context) - } - switch key { - case .and: - let value = try container.decode(ReplicationRuleAndOperator.self, forKey: .and) - self = .and(value) - case .prefix: - let value = try container.decode(String.self, forKey: .prefix) - self = .prefix(value) - case .tag: - let value = try container.decode(Tag.self, forKey: .tag) - self = .tag(value) - } - } - - public func encode(to encoder: Encoder) throws { - var container = encoder.container(keyedBy: CodingKeys.self) - switch self { - case .and(let value): - try container.encode(value, forKey: .and) - case .prefix(let value): - try container.encode(value, forKey: .prefix) - case .tag(let value): - try container.encode(value, forKey: .tag) - } - } - - public func validate(name: String) throws { - switch self { - case .and(let value): - try value.validate(name: "\(name).and") - case .tag(let value): - try value.validate(name: "\(name).tag") - default: - break - } - } - - private enum CodingKeys: String, CodingKey { - case and = "And" - case prefix = "Prefix" - case tag = "Tag" - } - } - public enum SelectObjectContentEventStream: AWSDecodableShape, Sendable { /// The Continuation Event. case cont(ContinuationEvent) @@ -1035,18 +906,22 @@ extension S3 { } public struct Bucket: AWSDecodableShape { + /// BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response. + public let bucketRegion: String? /// Date the bucket was created. This date can change when making changes to your bucket, such as editing its bucket policy. public let creationDate: Date? /// The name of the bucket. public let name: String? @inlinable - public init(creationDate: Date? = nil, name: String? = nil) { + public init(bucketRegion: String? = nil, creationDate: Date? = nil, name: String? = nil) { + self.bucketRegion = bucketRegion self.creationDate = creationDate self.name = name } private enum CodingKeys: String, CodingKey { + case bucketRegion = "BucketRegion" case creationDate = "CreationDate" case name = "Name" } @@ -1220,9 +1095,9 @@ extension S3 { } public struct Checksum: AWSDecodableShape { - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -1262,11 +1137,11 @@ extension S3 { public struct CompleteMultipartUploadOutput: AWSDecodableShape { /// The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used. Access points are not supported by directory buckets. public let bucket: String? - /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -1281,9 +1156,9 @@ extension S3 { /// The URI that identifies the newly created object. public let location: String? public let requestCharged: RequestCharged? - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms). public let serverSideEncryption: ServerSideEncryption? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? /// Version ID of the newly created object, in case the bucket has versioning turned on. This functionality is not supported for directory buckets. public let versionId: String? @@ -1338,12 +1213,12 @@ extension S3 { } public struct CompleteMultipartUploadRequest: AWSEncodableShape { - public static let _xmlRootNodeName: String? = "MultipartUpload" + public static let _xmlRootNodeName: String? = "CompleteMultipartUpload" /// Name of the bucket to which the multipart upload was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -1426,9 +1301,9 @@ extension S3 { } public struct CompletedPart: AWSEncodableShape { - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -1482,7 +1357,7 @@ extension S3 { } public struct CopyObjectOutput: AWSDecodableShape { - /// Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the copied object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? /// Container for all response elements. public let copyObjectResult: CopyObjectResult @@ -1491,15 +1366,15 @@ extension S3 { /// If the object expiration is configured, the response includes this header. This functionality is not supported for directory buckets. public let expiration: String? public let requestCharged: RequestCharged? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This functionality is not supported for directory buckets. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. public let ssekmsEncryptionContext: String? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? /// Version ID of the newly created copy. This functionality is not supported for directory buckets. public let versionId: String? @@ -1543,7 +1418,8 @@ extension S3 { public let acl: ObjectCannedACL? /// The name of the destination bucket. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. This functionality is not supported when the destination bucket is a directory bucket. + /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. public let bucketKeyEnabled: Bool? /// Specifies the caching behavior along the request/reply chain. public let cacheControl: String? @@ -1604,7 +1480,7 @@ extension S3 { @OptionalCustomCoding public var objectLockRetainUntilDate: Date? public let requestPayer: RequestPayer? - /// The server-side encryption algorithm used when storing this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response. Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response. Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a different default encryption configuration, Amazon S3 uses the corresponding encryption key to encrypt the target object copy. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption in the Amazon S3 User Guide. General purpose buckets For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy. When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. Directory buckets For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration. public let serverSideEncryption: ServerSideEncryption? /// Specifies the algorithm to use when encrypting the object (for example, AES256). When you perform a CopyObject operation, if you want to use a different type of encryption setting for the target object, you can specify appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a KMS key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. This functionality is not supported when the destination bucket is a directory bucket. public let sseCustomerAlgorithm: String? @@ -1612,9 +1488,10 @@ extension S3 { public let sseCustomerKey: String? /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported when the destination bucket is a directory bucket. public let sseCustomerKeyMD5: String? - /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value must be explicitly added to specify encryption context for CopyObject requests. This functionality is not supported when the destination bucket is a directory bucket. + /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. General purpose buckets - This value must be explicitly added to specify encryption context for CopyObject requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. public let ssekmsEncryptionContext: String? - /// Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. This functionality is not supported when the destination bucket is a directory bucket. + /// Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. + /// The Amazon Web Services managed key (aws/s3) isn't supported. public let ssekmsKeyId: String? /// If the x-amz-storage-class header is not used, the copied object will be stored in the STANDARD Storage Class by default. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. /// Unsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request. Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. You can use the CopyObject action to change the storage class of an object that is already stored in Amazon S3 by using the x-amz-storage-class header. For more information, see Storage Classes in the Amazon S3 User Guide. Before using an object as a source object for the copy operation, you must restore a copy of it if it meets any of the following conditions: The storage class of the source object is GLACIER or DEEP_ARCHIVE. The storage class of the source object is INTELLIGENT_TIERING and it's S3 Intelligent-Tiering access tier is Archive Access or Deep Archive Access. For more information, see RestoreObject and Copying Objects in the Amazon S3 User Guide. @@ -1728,9 +1605,9 @@ extension S3 { } public struct CopyObjectResult: AWSDecodableShape { - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -1762,9 +1639,9 @@ extension S3 { } public struct CopyPartResult: AWSDecodableShape { - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -1896,22 +1773,22 @@ extension S3 { public let abortRuleId: String? /// The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used. Access points are not supported by directory buckets. public let bucket: String? - /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? /// The algorithm that was used to create a checksum of the object. public let checksumAlgorithm: ChecksumAlgorithm? /// Object key for which the multipart upload was initiated. public let key: String? public let requestCharged: RequestCharged? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This functionality is not supported for directory buckets. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. public let ssekmsEncryptionContext: String? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? /// ID for the initiated multipart upload. public let uploadId: String? @@ -1963,7 +1840,8 @@ extension S3 { public let acl: ObjectCannedACL? /// The name of the bucket where the multipart upload is initiated and where the object is uploaded. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not supported for directory buckets. + /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? @@ -2002,7 +1880,8 @@ extension S3 { @OptionalCustomCoding public var objectLockRetainUntilDate: Date? public let requestPayer: RequestPayer? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket. + /// public let serverSideEncryption: ServerSideEncryption? /// Specifies the algorithm to use when encrypting the object (for example, AES256). This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? @@ -2010,9 +1889,10 @@ extension S3 { public let sseCustomerKey: String? /// Specifies the 128-bit MD5 digest of the customer-provided encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This functionality is not supported for directory buckets. + /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. public let ssekmsEncryptionContext: String? - /// Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption. This functionality is not supported for directory buckets. + /// Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. + /// The Amazon Web Services managed key (aws/s3) isn't supported. public let ssekmsKeyId: String? /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide. For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. public let storageClass: StorageClass? @@ -2098,12 +1978,34 @@ extension S3 { } public struct CreateSessionOutput: AWSDecodableShape { + /// Indicates whether to use an S3 Bucket Key for server-side encryption with KMS keys (SSE-KMS). + public let bucketKeyEnabled: Bool? /// The established temporary security credentials for the created session. public let credentials: SessionCredentials + /// The server-side encryption algorithm used when you store objects in the directory bucket. + public let serverSideEncryption: ServerSideEncryption? + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. + public let ssekmsEncryptionContext: String? + /// If you specify x-amz-server-side-encryption with aws:kms, this header indicates the ID of the KMS symmetric encryption customer managed key that was used for object encryption. + public let ssekmsKeyId: String? @inlinable - public init(credentials: SessionCredentials) { + public init(bucketKeyEnabled: Bool? = nil, credentials: SessionCredentials, serverSideEncryption: ServerSideEncryption? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil) { + self.bucketKeyEnabled = bucketKeyEnabled self.credentials = credentials + self.serverSideEncryption = serverSideEncryption + self.ssekmsEncryptionContext = ssekmsEncryptionContext + self.ssekmsKeyId = ssekmsKeyId + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.bucketKeyEnabled = try response.decodeHeaderIfPresent(Bool.self, key: "x-amz-server-side-encryption-bucket-key-enabled") + self.credentials = try container.decode(SessionCredentials.self, forKey: .credentials) + self.serverSideEncryption = try response.decodeHeaderIfPresent(ServerSideEncryption.self, key: "x-amz-server-side-encryption") + self.ssekmsEncryptionContext = try response.decodeHeaderIfPresent(String.self, key: "x-amz-server-side-encryption-context") + self.ssekmsKeyId = try response.decodeHeaderIfPresent(String.self, key: "x-amz-server-side-encryption-aws-kms-key-id") } private enum CodingKeys: String, CodingKey { @@ -2114,20 +2016,38 @@ extension S3 { public struct CreateSessionRequest: AWSEncodableShape { /// The name of the bucket that you create a session for. public let bucket: String - /// Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint APIs on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. + /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using KMS keys (SSE-KMS). S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. + public let bucketKeyEnabled: Bool? + /// The server-side encryption algorithm to use when you store objects in the directory bucket. For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. + public let serverSideEncryption: ServerSideEncryption? + /// Specifies the mode of the session that will be created, either ReadWrite or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session is capable of executing all the Zonal endpoint API operations on a directory bucket. A ReadOnly session is constrained to execute the following Zonal endpoint API operations: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, and ListMultipartUploads. public let sessionMode: SessionMode? + /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. + public let ssekmsEncryptionContext: String? + /// If you specify x-amz-server-side-encryption with aws:kms, you must specify the x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same account that't issuing the command, you must use the full Key ARN not the Key ID. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. + /// The Amazon Web Services managed key (aws/s3) isn't supported. + public let ssekmsKeyId: String? @inlinable - public init(bucket: String, sessionMode: SessionMode? = nil) { + public init(bucket: String, bucketKeyEnabled: Bool? = nil, serverSideEncryption: ServerSideEncryption? = nil, sessionMode: SessionMode? = nil, ssekmsEncryptionContext: String? = nil, ssekmsKeyId: String? = nil) { self.bucket = bucket + self.bucketKeyEnabled = bucketKeyEnabled + self.serverSideEncryption = serverSideEncryption self.sessionMode = sessionMode + self.ssekmsEncryptionContext = ssekmsEncryptionContext + self.ssekmsKeyId = ssekmsKeyId } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.bucket, key: "Bucket") + request.encodeHeader(self.bucketKeyEnabled, key: "x-amz-server-side-encryption-bucket-key-enabled") + request.encodeHeader(self.serverSideEncryption, key: "x-amz-server-side-encryption") request.encodeHeader(self.sessionMode, key: "x-amz-create-session-mode") + request.encodeHeader(self.ssekmsEncryptionContext, key: "x-amz-server-side-encryption-context") + request.encodeHeader(self.ssekmsKeyId, key: "x-amz-server-side-encryption-aws-kms-key-id") } private enum CodingKeys: CodingKey {} @@ -2228,9 +2148,10 @@ extension S3 { } public struct DeleteBucketEncryptionRequest: AWSEncodableShape { - /// The name of the bucket containing the server-side encryption configuration to delete. + /// The name of the bucket containing the server-side encryption configuration to delete. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide public let bucket: String - /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code + /// 501 Not Implemented. public let expectedBucketOwner: String? @inlinable @@ -2678,7 +2599,7 @@ extension S3 { public let bucket: String /// Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention permission. This functionality is not supported for directory buckets. public let bypassGovernanceRetention: Bool? - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. public let checksumAlgorithm: ChecksumAlgorithm? /// Container for the request. public let delete: Delete @@ -3115,9 +3036,10 @@ extension S3 { } public struct GetBucketEncryptionRequest: AWSEncodableShape { - /// The name of the bucket from which the server-side encryption configuration is retrieved. + /// The name of the bucket from which the server-side encryption configuration is retrieved. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide public let bucket: String - /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code + /// 501 Not Implemented. public let expectedBucketOwner: String? @inlinable @@ -3221,10 +3143,20 @@ extension S3 { public struct GetBucketLifecycleConfigurationOutput: AWSDecodableShape { /// Container for a lifecycle rule. public let rules: [LifecycleRule]? + /// Indicates which default minimum object size behavior is applied to the lifecycle configuration. all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default. varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB. To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior. + public let transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? @inlinable - public init(rules: [LifecycleRule]? = nil) { + public init(rules: [LifecycleRule]? = nil, transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? = nil) { self.rules = rules + self.transitionDefaultMinimumObjectSize = transitionDefaultMinimumObjectSize + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + let container = try decoder.container(keyedBy: CodingKeys.self) + self.rules = try container.decodeIfPresent([LifecycleRule].self, forKey: .rules) + self.transitionDefaultMinimumObjectSize = try response.decodeHeaderIfPresent(TransitionDefaultMinimumObjectSize.self, key: "x-amz-transition-default-minimum-object-size") } private enum CodingKeys: String, CodingKey { @@ -4036,13 +3968,13 @@ extension S3 { public let acceptRanges: String? /// Object data. public let body: AWSHTTPBody - /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -4090,13 +4022,13 @@ extension S3 { public let requestCharged: RequestCharged? /// Provides information about object restoration action and expiration time of the restored object copy. This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. public let restore: String? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3. public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? /// Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects. Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects. public let storageClass: StorageClass? @@ -4195,7 +4127,7 @@ extension S3 { public static let _options: AWSShapeOptions = [.checksumHeader] /// The bucket name containing the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Object Lambda access points - When you use this action with an Object Lambda access point, you must direct requests to the Object Lambda access point hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// To retrieve the checksum, this mode must be enabled. In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. + /// To retrieve the checksum, this mode must be enabled. General purpose buckets - In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. public let checksumMode: ChecksumMode? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -4628,13 +4560,13 @@ extension S3 { public let acceptRanges: String? /// The archive state of the head object. This functionality is not supported for directory buckets. public let archiveStatus: ArchiveStatus? - /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -4680,13 +4612,13 @@ extension S3 { public let requestCharged: RequestCharged? /// If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored. If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header returns the value ongoing-request="true". For more information about archiving objects, see Transitioning Objects: General Considerations. This functionality is not supported for directory buckets. Only the S3 Express One Zone storage class is supported by directory buckets to store objects. public let restore: String? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? /// Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects. For more information, see Storage Classes. Directory buckets - Only the S3 Express One Zone storage class is supported by directory buckets to store objects. public let storageClass: StorageClass? @@ -4777,7 +4709,7 @@ extension S3 { public struct HeadObjectRequest: AWSEncodableShape { /// The name of the bucket that contains the object. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// To retrieve the checksum, this parameter must be enabled. In addition, if you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. + /// To retrieve the checksum, this parameter must be enabled. General purpose buckets - If you enable checksum mode and the object is uploaded with a checksum and encrypted with an Key Management Service (KMS) key, you must have permission to use the kms:Decrypt action to retrieve the checksum. Directory buckets - If you enable ChecksumMode and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object. public let checksumMode: ChecksumMode? /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). public let expectedBucketOwner: String? @@ -5316,6 +5248,40 @@ extension S3 { } } + public struct LifecycleRuleFilter: AWSEncodableShape & AWSDecodableShape { + public let and: LifecycleRuleAndOperator? + /// Minimum object size to which the rule applies. + public let objectSizeGreaterThan: Int64? + /// Maximum object size to which the rule applies. + public let objectSizeLessThan: Int64? + /// Prefix identifying one or more objects to which the rule applies. Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints. + public let prefix: String? + /// This tag must exist in the object's tag set in order for the rule to apply. + public let tag: Tag? + + @inlinable + public init(and: LifecycleRuleAndOperator? = nil, objectSizeGreaterThan: Int64? = nil, objectSizeLessThan: Int64? = nil, prefix: String? = nil, tag: Tag? = nil) { + self.and = and + self.objectSizeGreaterThan = objectSizeGreaterThan + self.objectSizeLessThan = objectSizeLessThan + self.prefix = prefix + self.tag = tag + } + + public func validate(name: String) throws { + try self.and?.validate(name: "\(name).and") + try self.tag?.validate(name: "\(name).tag") + } + + private enum CodingKeys: String, CodingKey { + case and = "And" + case objectSizeGreaterThan = "ObjectSizeGreaterThan" + case objectSizeLessThan = "ObjectSizeLessThan" + case prefix = "Prefix" + case tag = "Tag" + } + } + public struct ListBucketAnalyticsConfigurationsOutput: AWSDecodableShape { /// The list of analytics configurations for a bucket. public let analyticsConfigurationList: [AnalyticsConfiguration]? @@ -5530,38 +5496,50 @@ extension S3 { public let continuationToken: String? /// The owner of the buckets listed. public let owner: Owner? + /// If Prefix was sent with the request, it is included in the response. All bucket names in the response begin with the specified bucket name prefix. + public let prefix: String? @inlinable - public init(buckets: [Bucket]? = nil, continuationToken: String? = nil, owner: Owner? = nil) { + public init(buckets: [Bucket]? = nil, continuationToken: String? = nil, owner: Owner? = nil, prefix: String? = nil) { self.buckets = buckets self.continuationToken = continuationToken self.owner = owner + self.prefix = prefix } private enum CodingKeys: String, CodingKey { case buckets = "Buckets" case continuationToken = "ContinuationToken" case owner = "Owner" + case prefix = "Prefix" } } public struct ListBucketsRequest: AWSEncodableShape { + /// Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints. Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2. + public let bucketRegion: String? /// ContinuationToken indicates to Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key. You can use this ContinuationToken for pagination of the list results. Length Constraints: Minimum length of 0. Maximum length of 1024. Required: No. public let continuationToken: String? /// Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response. public let maxBuckets: Int? + /// Limits the response to bucket names that begin with the specified bucket name prefix. + public let prefix: String? @inlinable - public init(continuationToken: String? = nil, maxBuckets: Int? = nil) { + public init(bucketRegion: String? = nil, continuationToken: String? = nil, maxBuckets: Int? = nil, prefix: String? = nil) { + self.bucketRegion = bucketRegion self.continuationToken = continuationToken self.maxBuckets = maxBuckets + self.prefix = prefix } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.bucketRegion, key: "bucket-region") request.encodeQuery(self.continuationToken, key: "continuation-token") request.encodeQuery(self.maxBuckets, key: "max-buckets") + request.encodeQuery(self.prefix, key: "prefix") } public func validate(name: String) throws { @@ -6659,9 +6637,9 @@ extension S3 { } public struct ObjectPart: AWSDecodableShape { - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -6828,9 +6806,9 @@ extension S3 { } public struct Part: AWSDecodableShape { - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -6943,7 +6921,7 @@ extension S3 { public let blockPublicPolicy: Bool? /// Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket. Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set. public let ignorePublicAcls: Bool? - /// Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has a public policy. Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. + /// Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has a public policy. Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked. public let restrictPublicBuckets: Bool? @inlinable @@ -7128,13 +7106,14 @@ extension S3 { public struct PutBucketEncryptionRequest: AWSEncodableShape { public static let _options: AWSShapeOptions = [.checksumHeader, .checksumRequired, .md5ChecksumHeader] public static let _xmlRootNodeName: String? = "ServerSideEncryptionConfiguration" - /// Specifies default encryption for a bucket using server-side encryption with different key options. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default encryption feature, see Amazon S3 Bucket Default Encryption in the Amazon S3 User Guide. + /// Specifies default encryption for a bucket using server-side encryption with different key options. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide public let bucket: String - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. public let checksumAlgorithm: ChecksumAlgorithm? - /// The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. + /// The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically. This functionality is not supported for directory buckets. public let contentMD5: String? - /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). + /// The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code + /// 501 Not Implemented. public let expectedBucketOwner: String? public let serverSideEncryptionConfiguration: ServerSideEncryptionConfiguration @@ -7222,6 +7201,23 @@ extension S3 { private enum CodingKeys: CodingKey {} } + public struct PutBucketLifecycleConfigurationOutput: AWSDecodableShape { + /// Indicates which default minimum object size behavior is applied to the lifecycle configuration. all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default. varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB. To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior. + public let transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? + + @inlinable + public init(transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? = nil) { + self.transitionDefaultMinimumObjectSize = transitionDefaultMinimumObjectSize + } + + public init(from decoder: Decoder) throws { + let response = decoder.userInfo[.awsResponse]! as! ResponseDecodingContainer + self.transitionDefaultMinimumObjectSize = try response.decodeHeaderIfPresent(TransitionDefaultMinimumObjectSize.self, key: "x-amz-transition-default-minimum-object-size") + } + + private enum CodingKeys: CodingKey {} + } + public struct PutBucketLifecycleConfigurationRequest: AWSEncodableShape { public static let _options: AWSShapeOptions = [.checksumHeader, .checksumRequired] public static let _xmlRootNodeName: String? = "LifecycleConfiguration" @@ -7233,13 +7229,16 @@ extension S3 { public let expectedBucketOwner: String? /// Container for lifecycle rules. You can add as many as 1,000 rules. public let lifecycleConfiguration: BucketLifecycleConfiguration? + /// Indicates which default minimum object size behavior is applied to the lifecycle configuration. all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default. varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB. To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior. + public let transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? @inlinable - public init(bucket: String, checksumAlgorithm: ChecksumAlgorithm? = nil, expectedBucketOwner: String? = nil, lifecycleConfiguration: BucketLifecycleConfiguration? = nil) { + public init(bucket: String, checksumAlgorithm: ChecksumAlgorithm? = nil, expectedBucketOwner: String? = nil, lifecycleConfiguration: BucketLifecycleConfiguration? = nil, transitionDefaultMinimumObjectSize: TransitionDefaultMinimumObjectSize? = nil) { self.bucket = bucket self.checksumAlgorithm = checksumAlgorithm self.expectedBucketOwner = expectedBucketOwner self.lifecycleConfiguration = lifecycleConfiguration + self.transitionDefaultMinimumObjectSize = transitionDefaultMinimumObjectSize } public func encode(to encoder: Encoder) throws { @@ -7249,6 +7248,7 @@ extension S3 { request.encodeHeader(self.checksumAlgorithm, key: "x-amz-sdk-checksum-algorithm") request.encodeHeader(self.expectedBucketOwner, key: "x-amz-expected-bucket-owner") try container.encode(self.lifecycleConfiguration) + request.encodeHeader(self.transitionDefaultMinimumObjectSize, key: "x-amz-transition-default-minimum-object-size") } public func validate(name: String) throws { @@ -7396,7 +7396,7 @@ extension S3 { public static let _xmlRootNodeName: String? = "Policy" /// The name of the bucket. Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide public let bucket: String - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. public let checksumAlgorithm: ChecksumAlgorithm? /// Set this parameter to true to confirm that you want to remove your permissions to change this bucket policy in the future. This functionality is not supported for directory buckets. public let confirmRemoveSelfBucketAccess: Bool? @@ -7849,11 +7849,11 @@ extension S3 { } public struct PutObjectOutput: AWSDecodableShape { - /// Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -7864,15 +7864,15 @@ extension S3 { /// If the expiration is configured for the object (see PutBucketLifecycleConfiguration) in the Amazon S3 User Guide, the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL-encoded. This functionality is not supported for directory buckets. public let expiration: String? public let requestCharged: RequestCharged? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3. public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This functionality is not supported for directory buckets. + /// If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. public let ssekmsEncryptionContext: String? - /// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? /// Version ID of the object. If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets in the Amazon S3 User Guide. For information about returning the versioning state of a bucket, see GetBucketVersioning. This functionality is not supported for directory buckets. public let versionId: String? @@ -7925,15 +7925,16 @@ extension S3 { public let body: AWSHTTPBody? /// The bucket name to which the PUT action was initiated. Directory buckets - When you use this operation with a directory bucket, you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide. Access points - When you use this action with an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide. Access points and Object Lambda access points are not supported by directory buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When you use this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts access point ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see What is S3 on Outposts? in the Amazon S3 User Guide. public let bucket: String - /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key. This functionality is not supported for directory buckets. + /// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). General purpose buckets - Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. public let bucketKeyEnabled: Bool? /// Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9. public let cacheControl: String? - /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. + /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For the x-amz-checksum-algorithm header, replace algorithm with the supported algorithm from the following list: CRC32 CRC32C SHA1 SHA256 For more information, see Checking object integrity in the Amazon S3 User Guide. If the individual checksum value you provide through x-amz-checksum-algorithm doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm . The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance. public let checksumAlgorithm: ChecksumAlgorithm? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -7947,7 +7948,7 @@ extension S3 { public let contentLanguage: String? /// Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length. public let contentLength: Int64? - /// The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide. This functionality is not supported for directory buckets. + /// The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication. The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information, see Uploading objects to an Object Lock enabled bucket in the Amazon S3 User Guide. This functionality is not supported for directory buckets. public let contentMD5: String? /// A standard MIME type describing the format of the contents. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type. public let contentType: String? @@ -7978,7 +7979,8 @@ extension S3 { @OptionalCustomCoding public var objectLockRetainUntilDate: Date? public let requestPayer: RequestPayer? - /// The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. + /// The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption in the Amazon S3 User Guide. Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your CreateSession requests or PUT object requests. Then, new objects are automatically encrypted with the desired encryption settings. For more information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), the encryption request headers must match the default encryption configuration of the directory bucket. + /// public let serverSideEncryption: ServerSideEncryption? /// Specifies the algorithm to use when encrypting the object (for example, AES256). This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? @@ -7986,9 +7988,10 @@ extension S3 { public let sseCustomerKey: String? /// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of this header is a base64-encoded UTF-8 string holding JSON with the encryption context key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject or CopyObject operations on this object. This value must be explicitly added during CopyObject operations. This functionality is not supported for directory buckets. + /// Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. This value is stored as object metadata and automatically gets passed on to Amazon Web Services KMS for future GetObject operations on this object. General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide. Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported. public let ssekmsEncryptionContext: String? - /// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID. This functionality is not supported for directory buckets. + /// Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same account that's issuing the command, you must use the full Key ARN not the Key ID. General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS key to use. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3) to protect the data. Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. If you want to specify the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. + /// The Amazon Web Services managed key (aws/s3) isn't supported. public let ssekmsKeyId: String? /// By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide. For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. public let storageClass: StorageClass? @@ -8396,7 +8399,7 @@ extension S3 { public let deleteMarkerReplication: DeleteMarkerReplication? /// A container for information about the replication destination and its configurations including enabling the S3 Replication Time Control (S3 RTC). public let destination: Destination - /// Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 User Guide. + /// Optional configuration to replicate existing source bucket objects. This parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide. public let existingObjectReplication: ExistingObjectReplication? public let filter: ReplicationRuleFilter? /// A unique identifier for the rule. The maximum value is 255 characters. @@ -8478,6 +8481,33 @@ extension S3 { } } + public struct ReplicationRuleFilter: AWSEncodableShape & AWSDecodableShape { + /// A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. For example: If you specify both a Prefix and a Tag filter, wrap these filters in an And tag. If you specify a filter based on multiple tags, wrap the Tag elements in an And tag. + public let and: ReplicationRuleAndOperator? + /// An object key name prefix that identifies the subset of objects to which the rule applies. Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints. + public let prefix: String? + /// A container for specifying a tag key and value. The rule applies only to objects that have the tag in their tag set. + public let tag: Tag? + + @inlinable + public init(and: ReplicationRuleAndOperator? = nil, prefix: String? = nil, tag: Tag? = nil) { + self.and = and + self.prefix = prefix + self.tag = tag + } + + public func validate(name: String) throws { + try self.and?.validate(name: "\(name).and") + try self.tag?.validate(name: "\(name).tag") + } + + private enum CodingKeys: String, CodingKey { + case and = "And" + case prefix = "Prefix" + case tag = "Tag" + } + } + public struct ReplicationTime: AWSEncodableShape & AWSDecodableShape { /// Specifies whether the replication time is enabled. public let status: ReplicationTimeStatus @@ -8614,11 +8644,11 @@ extension S3 { public let glacierJobParameters: GlacierJobParameters? /// Describes the location where the restore job's output is stored. public let outputLocation: OutputLocation? - /// Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more Describes the parameters for Select job types. + /// Describes the parameters for Select job types. public let selectParameters: SelectParameters? /// Retrieval tier at which the restore will be processed. public let tier: Tier? - /// Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more Type of restore request. + /// Type of restore request. public let type: RestoreRequestType? @inlinable @@ -8874,7 +8904,7 @@ extension S3 { } public struct SelectParameters: AWSEncodableShape { - /// Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more The expression that is used to query the object. + /// The expression that is used to query the object. public let expression: String /// The type of the provided expression (for example, SQL). public let expressionType: ExpressionType @@ -8900,9 +8930,9 @@ extension S3 { } public struct ServerSideEncryptionByDefault: AWSEncodableShape & AWSDecodableShape { - /// Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse. You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key. Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Key Alias: alias/alias-name If you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log. If you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide. + /// Amazon Web Services Key Management Service (KMS) customer managed key ID to use for the default encryption. General purpose buckets - This parameter is allowed if and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse. Directory buckets - This parameter is allowed if and only if SSEAlgorithm is set to aws:kms. You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key. Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab Key Alias: alias/alias-name If you are using encryption with cross-account or Amazon Web Services service operations, you must use a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations. General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the requester’s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log. Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide. public let kmsMasterKeyID: String? - /// Server-side encryption algorithm to use for the default encryption. + /// Server-side encryption algorithm to use for the default encryption. For directory buckets, there are only two supported values for server-side encryption: AES256 and aws:kms. public let sseAlgorithm: ServerSideEncryption @inlinable @@ -8934,7 +8964,8 @@ extension S3 { public struct ServerSideEncryptionRule: AWSEncodableShape & AWSDecodableShape { /// Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied. public let applyServerSideEncryptionByDefault: ServerSideEncryptionByDefault? - /// Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. + /// Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 to use an S3 Bucket Key. General purpose buckets - By default, S3 Bucket Key is not enabled. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide. Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets + /// to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object. public let bucketKeyEnabled: Bool? @inlinable @@ -9229,20 +9260,20 @@ extension S3 { } public struct UploadPartCopyOutput: AWSDecodableShape { - /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? /// Container for all response elements. public let copyPartResult: CopyPartResult /// The version of the source object that was copied, if you have enabled versioning on the source bucket. This functionality is not supported when the source object is in a directory bucket. public let copySourceVersionId: String? public let requestCharged: RequestCharged? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? @inlinable @@ -9370,11 +9401,11 @@ extension S3 { } public struct UploadPartOutput: AWSDecodableShape { - /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality is not supported for directory buckets. + /// Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption with Key Management Service (KMS) keys (SSE-KMS). public let bucketKeyEnabled: Bool? - /// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. + /// The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. When you use the API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -9383,13 +9414,13 @@ extension S3 { /// Entity tag for the uploaded object. public let eTag: String? public let requestCharged: RequestCharged? - /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported. + /// The server-side encryption algorithm used when you store this object in Amazon S3 (for example, AES256, aws:kms). public let serverSideEncryption: ServerSideEncryption? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to confirm the encryption algorithm that's used. This functionality is not supported for directory buckets. public let sseCustomerAlgorithm: String? /// If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide the round-trip message integrity verification of the customer-provided encryption key. This functionality is not supported for directory buckets. public let sseCustomerKeyMD5: String? - /// If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. This functionality is not supported for directory buckets. + /// If present, indicates the ID of the KMS key that was used for object encryption. public let ssekmsKeyId: String? @inlinable @@ -9434,9 +9465,9 @@ extension S3 { public let bucket: String /// Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more information, see Checking object integrity in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must be the same for all parts and it match the checksum value supplied in the CreateMultipartUpload request. public let checksumAlgorithm: ChecksumAlgorithm? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumCRC32C: String? /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide. public let checksumSHA1: String? @@ -9574,9 +9605,9 @@ extension S3 { public let bucketKeyEnabled: Bool? /// Specifies caching behavior along the request/reply chain. public let cacheControl: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumCRC32: String? - /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. + /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumCRC32C: String? /// This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide. Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail. public let checksumSHA1: String? diff --git a/Sources/Soto/Services/SESv2/SESv2_api.swift b/Sources/Soto/Services/SESv2/SESv2_api.swift index 448bfbae49..00ad948c8d 100644 --- a/Sources/Soto/Services/SESv2/SESv2_api.swift +++ b/Sources/Soto/Services/SESv2/SESv2_api.swift @@ -2176,18 +2176,21 @@ public struct SESv2: AWSService { /// /// Parameters: /// - configurationSetName: The name of the configuration set to associate with a dedicated IP pool. + /// - maxDeliverySeconds: The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email. If specified, the value must greater than or equal to 300 seconds (5 minutes) and less than or equal to 50400 seconds (840 minutes). /// - sendingPoolName: The name of the dedicated IP pool to associate with the configuration set. /// - tlsPolicy: Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established. /// - logger: Logger use during operation @inlinable public func putConfigurationSetDeliveryOptions( configurationSetName: String, + maxDeliverySeconds: Int64? = nil, sendingPoolName: String? = nil, tlsPolicy: TlsPolicy? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> PutConfigurationSetDeliveryOptionsResponse { let input = PutConfigurationSetDeliveryOptionsRequest( configurationSetName: configurationSetName, + maxDeliverySeconds: maxDeliverySeconds, sendingPoolName: sendingPoolName, tlsPolicy: tlsPolicy ) @@ -2308,16 +2311,19 @@ public struct SESv2: AWSService { /// Parameters: /// - configurationSetName: The name of the configuration set. /// - customRedirectDomain: The domain to use to track open and click events. + /// - httpsPolicy: /// - logger: Logger use during operation @inlinable public func putConfigurationSetTrackingOptions( configurationSetName: String, customRedirectDomain: String? = nil, + httpsPolicy: HttpsPolicy? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> PutConfigurationSetTrackingOptionsResponse { let input = PutConfigurationSetTrackingOptionsRequest( configurationSetName: configurationSetName, - customRedirectDomain: customRedirectDomain + customRedirectDomain: customRedirectDomain, + httpsPolicy: httpsPolicy ) return try await self.putConfigurationSetTrackingOptions(input, logger: logger) } diff --git a/Sources/Soto/Services/SESv2/SESv2_shapes.swift b/Sources/Soto/Services/SESv2/SESv2_shapes.swift index ceb30f98d3..975a1d5600 100644 --- a/Sources/Soto/Services/SESv2/SESv2_shapes.swift +++ b/Sources/Soto/Services/SESv2/SESv2_shapes.swift @@ -158,6 +158,13 @@ extension SESv2 { public var description: String { return self.rawValue } } + public enum HttpsPolicy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case optional = "OPTIONAL" + case require = "REQUIRE" + case requireOpenOnly = "REQUIRE_OPEN_ONLY" + public var description: String { return self.rawValue } + } + public enum IdentityType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case domain = "DOMAIN" case emailAddress = "EMAIL_ADDRESS" @@ -801,6 +808,10 @@ extension SESv2 { self.vdmOptions = vdmOptions } + public func validate(name: String) throws { + try self.deliveryOptions?.validate(name: "\(name).deliveryOptions") + } + private enum CodingKeys: String, CodingKey { case configurationSetName = "ConfigurationSetName" case deliveryOptions = "DeliveryOptions" @@ -1606,18 +1617,27 @@ extension SESv2 { } public struct DeliveryOptions: AWSEncodableShape & AWSDecodableShape { + /// The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email. If specified, the value must greater than or equal to 300 seconds (5 minutes) and less than or equal to 50400 seconds (840 minutes). + public let maxDeliverySeconds: Int64? /// The name of the dedicated IP pool to associate with the configuration set. public let sendingPoolName: String? /// Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established. public let tlsPolicy: TlsPolicy? @inlinable - public init(sendingPoolName: String? = nil, tlsPolicy: TlsPolicy? = nil) { + public init(maxDeliverySeconds: Int64? = nil, sendingPoolName: String? = nil, tlsPolicy: TlsPolicy? = nil) { + self.maxDeliverySeconds = maxDeliverySeconds self.sendingPoolName = sendingPoolName self.tlsPolicy = tlsPolicy } + public func validate(name: String) throws { + try self.validate(self.maxDeliverySeconds, name: "maxDeliverySeconds", parent: name, max: 50400) + try self.validate(self.maxDeliverySeconds, name: "maxDeliverySeconds", parent: name, min: 300) + } + private enum CodingKeys: String, CodingKey { + case maxDeliverySeconds = "MaxDeliverySeconds" case sendingPoolName = "SendingPoolName" case tlsPolicy = "TlsPolicy" } @@ -4438,14 +4458,17 @@ extension SESv2 { public struct PutConfigurationSetDeliveryOptionsRequest: AWSEncodableShape { /// The name of the configuration set to associate with a dedicated IP pool. public let configurationSetName: String + /// The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email. If specified, the value must greater than or equal to 300 seconds (5 minutes) and less than or equal to 50400 seconds (840 minutes). + public let maxDeliverySeconds: Int64? /// The name of the dedicated IP pool to associate with the configuration set. public let sendingPoolName: String? /// Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established. public let tlsPolicy: TlsPolicy? @inlinable - public init(configurationSetName: String, sendingPoolName: String? = nil, tlsPolicy: TlsPolicy? = nil) { + public init(configurationSetName: String, maxDeliverySeconds: Int64? = nil, sendingPoolName: String? = nil, tlsPolicy: TlsPolicy? = nil) { self.configurationSetName = configurationSetName + self.maxDeliverySeconds = maxDeliverySeconds self.sendingPoolName = sendingPoolName self.tlsPolicy = tlsPolicy } @@ -4454,11 +4477,18 @@ extension SESv2 { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.configurationSetName, key: "ConfigurationSetName") + try container.encodeIfPresent(self.maxDeliverySeconds, forKey: .maxDeliverySeconds) try container.encodeIfPresent(self.sendingPoolName, forKey: .sendingPoolName) try container.encodeIfPresent(self.tlsPolicy, forKey: .tlsPolicy) } + public func validate(name: String) throws { + try self.validate(self.maxDeliverySeconds, name: "maxDeliverySeconds", parent: name, max: 50400) + try self.validate(self.maxDeliverySeconds, name: "maxDeliverySeconds", parent: name, min: 300) + } + private enum CodingKeys: String, CodingKey { + case maxDeliverySeconds = "MaxDeliverySeconds" case sendingPoolName = "SendingPoolName" case tlsPolicy = "TlsPolicy" } @@ -4557,11 +4587,13 @@ extension SESv2 { public let configurationSetName: String /// The domain to use to track open and click events. public let customRedirectDomain: String? + public let httpsPolicy: HttpsPolicy? @inlinable - public init(configurationSetName: String, customRedirectDomain: String? = nil) { + public init(configurationSetName: String, customRedirectDomain: String? = nil, httpsPolicy: HttpsPolicy? = nil) { self.configurationSetName = configurationSetName self.customRedirectDomain = customRedirectDomain + self.httpsPolicy = httpsPolicy } public func encode(to encoder: Encoder) throws { @@ -4569,10 +4601,12 @@ extension SESv2 { var container = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.configurationSetName, key: "ConfigurationSetName") try container.encodeIfPresent(self.customRedirectDomain, forKey: .customRedirectDomain) + try container.encodeIfPresent(self.httpsPolicy, forKey: .httpsPolicy) } private enum CodingKeys: String, CodingKey { case customRedirectDomain = "CustomRedirectDomain" + case httpsPolicy = "HttpsPolicy" } } @@ -5583,14 +5617,18 @@ extension SESv2 { public struct TrackingOptions: AWSEncodableShape & AWSDecodableShape { /// The domain to use for tracking open and click events. public let customRedirectDomain: String + /// The https policy to use for tracking open and click events. + public let httpsPolicy: HttpsPolicy? @inlinable - public init(customRedirectDomain: String) { + public init(customRedirectDomain: String, httpsPolicy: HttpsPolicy? = nil) { self.customRedirectDomain = customRedirectDomain + self.httpsPolicy = httpsPolicy } private enum CodingKeys: String, CodingKey { case customRedirectDomain = "CustomRedirectDomain" + case httpsPolicy = "HttpsPolicy" } } diff --git a/Sources/Soto/Services/SSM/SSM_api.swift b/Sources/Soto/Services/SSM/SSM_api.swift index 0be1453188..2a3bb1b4e9 100644 --- a/Sources/Soto/Services/SSM/SSM_api.swift +++ b/Sources/Soto/Services/SSM/SSM_api.swift @@ -229,7 +229,7 @@ public struct SSM: AWSService { return try await self.cancelMaintenanceWindowExecution(input, logger: logger) } - /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Setting up Amazon Web Services Systems Manager for hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. + /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. @Sendable @inlinable public func createActivation(_ input: CreateActivationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateActivationResult { @@ -242,13 +242,13 @@ public struct SSM: AWSService { logger: logger ) } - /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Setting up Amazon Web Services Systems Manager for hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. + /// Generates an activation code and activation ID you can use to register your on-premises servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and ID when installing SSM Agent on machines in your hybrid environment. For more information about requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed nodes. /// /// Parameters: /// - defaultInstanceName: The name of the registered, managed node as it will appear in the Amazon Web Services Systems Manager console or when you use the Amazon Web Services command line tools to list Systems Manager resources. Don't enter personally identifiable information in this field. /// - description: A user-defined description of the resource that you want to register with Systems Manager. Don't enter personally identifiable information in this field. - /// - expirationDate: The date by which this activation request should expire, in timestamp format, such as "2021-07-07T00:00:00". You can specify a date up to 30 days in advance. If you don't provide an expiration date, the activation code expires in 24 hours. - /// - iamRole: The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. + /// - expirationDate: The date by which this activation request should expire, in timestamp format, such as "2024-07-07T00:00:00". You can specify a date up to 30 days in advance. If you don't provide an expiration date, the activation code expires in 24 hours. + /// - iamRole: The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in a hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. /// - registrationLimit: Specify the maximum number of managed nodes you want to register. The default value is 1. /// - registrationMetadata: Reserved for internal use. /// - tags: Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an activation to identify which servers or virtual machines (VMs) in your on-premises environment you intend to activate. In this case, you could specify the following key-value pairs: Key=OS,Value=Windows Key=Environment,Value=Production When you install SSM Agent on your on-premises servers and VMs, you specify an activation ID and code. When you specify the activation ID and code, tags assigned to the activation are automatically applied to the on-premises servers or VMs. You can't add tags to or delete tags from an existing activation. You can tag your on-premises servers, edge devices, and VMs after they connect to Systems Manager for the first time and are assigned a managed node ID. This means they are listed in the Amazon Web Services Systems Manager console with an ID that is prefixed with "mi-". For information about how to add tags to your managed nodes, see AddTagsToResource. For information about how to remove tags from your managed nodes, see RemoveTagsFromResource. @@ -312,7 +312,7 @@ public struct SSM: AWSService { /// - tags: Adds or overwrites one or more tags for a State Manager association. Tags are metadata that you can assign to your Amazon Web Services resources. Tags enable you to categorize your resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define. /// - targetLocations: A location is a combination of Amazon Web Services Regions and Amazon Web Services accounts where you want to run the association. Use this action to create an association in multiple Regions and multiple accounts. /// - targetMaps: A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together. - /// - targets: The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see About targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide. + /// - targets: The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see Understanding targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide. /// - logger: Logger use during operation @inlinable public func createAssociation( @@ -638,7 +638,7 @@ public struct SSM: AWSService { /// /// Parameters: /// - approvalRules: A set of rules used to include patches in the baseline. - /// - approvedPatches: A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// - approvedPatches: A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. /// - approvedPatchesComplianceLevel: Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. The default value is UNSPECIFIED. /// - approvedPatchesEnableNonSecurity: Indicates whether the list of approved patches includes non-security updates that should be applied to the managed nodes. The default value is false. Applies to Linux managed nodes only. /// - clientToken: User-provided idempotency token. @@ -646,7 +646,7 @@ public struct SSM: AWSService { /// - globalFilters: A set of global filters used to include patches in the baseline. /// - name: The name of the patch baseline. /// - operatingSystem: Defines the operating system the patch baseline applies to. The default value is WINDOWS. - /// - rejectedPatches: A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// - rejectedPatches: A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. /// - rejectedPatchesAction: The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified. BLOCK All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. /// - sources: Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only. /// - tags: Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to. In this case, you could specify the following key-value pairs: Key=PatchSeverity,Value=Critical Key=OS,Value=Windows To add tags to an existing patch baseline, use the AddTagsToResource operation. @@ -686,7 +686,7 @@ public struct SSM: AWSService { return try await self.createPatchBaseline(input, logger: logger) } - /// A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource. You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Configuring resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide. You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide. A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync. By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. + /// A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource. You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creatinga a resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide. You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide. A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync. By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. @Sendable @inlinable public func createResourceDataSync(_ input: CreateResourceDataSyncRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateResourceDataSyncResult { @@ -699,7 +699,7 @@ public struct SSM: AWSService { logger: logger ) } - /// A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource. You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Configuring resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide. You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide. A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync. By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. + /// A resource data sync helps you view data from multiple sources in a single location. Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource. You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creatinga a resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide. You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more information, see Setting up Systems Manager Explorer to display data from multiple accounts and Regions in the Amazon Web Services Systems Manager User Guide. A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync. By default, data isn't encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. /// /// Parameters: /// - s3Destination: Amazon S3 configuration details for the sync. This parameter is required if the SyncType value is SyncToDestination. @@ -1801,7 +1801,7 @@ public struct SSM: AWSService { /// Retrieves information about the patches on the specified managed node and their state relative to the patch baseline being used for the node. /// /// Parameters: - /// - filters: Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Understanding patch compliance state values in the Amazon Web Services Systems Manager User Guide. + /// - filters: Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Patch compliance state values in the Amazon Web Services Systems Manager User Guide. /// - instanceId: The ID of the managed node whose patch state information should be retrieved. /// - maxResults: The maximum number of patches to return (per page). /// - nextToken: The token for the next set of items to return. (You received this token from a previous call.) @@ -1991,7 +1991,7 @@ public struct SSM: AWSService { /// Lists the executions of a maintenance window. This includes information about when the maintenance window was scheduled to be active, and information about tasks registered and run with the maintenance window. /// /// Parameters: - /// - filters: Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2021-11-04T05:00:00Z. + /// - filters: Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2024-11-04T05:00:00Z. /// - maxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results. /// - nextToken: The token for the next set of items to return. (You received this token from a previous call.) /// - windowId: The ID of the maintenance window whose executions should be retrieved. @@ -4321,7 +4321,7 @@ public struct SSM: AWSService { /// - maxErrors: The maximum number of errors allowed before this task stops being scheduled. Although this element is listed as "Required: No", a value can be omitted only when you are registering or updating a targetless task You must provide a value in all other cases. For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of 1. This value doesn't affect the running of your task. /// - name: An optional name for the task. /// - priority: The priority of the task in the maintenance window, the lower the number the higher the priority. Tasks in a maintenance window are scheduled in priority order with tasks that have the same priority scheduled in parallel. - /// - serviceRoleArn: The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// - serviceRoleArn: The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. /// - targets: The targets (either managed nodes or maintenance window targets). One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide. Specify managed nodes using the following format: Key=InstanceIds,Values=, Specify maintenance window targets using the following format: Key=WindowTargetIds,Values=, /// - taskArn: The ARN of the task to run. /// - taskInvocationParameters: The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty. @@ -4627,15 +4627,16 @@ public struct SSM: AWSService { /// - clientToken: User-provided idempotency token. The token must be unique, is case insensitive, enforces the UUID format, and can't be reused. /// - documentName: The name of the SSM document to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document ARN. For more information about how to use shared documents, see Sharing SSM documents in the Amazon Web Services Systems Manager User Guide. /// - documentVersion: The version of the Automation runbook to use for this execution. - /// - maxConcurrency: The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10. - /// - maxErrors: The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time. + /// - maxConcurrency: The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10. If both this parameter and the TargetLocation:TargetsMaxConcurrency are supplied, TargetLocation:TargetsMaxConcurrency takes precedence. + /// - maxErrors: The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time. If this parameter and the TargetLocation:TargetsMaxErrors parameter are both supplied, TargetLocation:TargetsMaxErrors takes precedence. /// - mode: The execution mode of the automation. Valid modes include the following: Auto and Interactive. The default mode is Auto. /// - parameters: A key-value map of execution parameters, which match the declared parameters in the Automation runbook. /// - tags: Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs: Key=environment,Value=test Key=OS,Value=Windows To add tags to an existing automation, use the AddTagsToResource operation. - /// - targetLocations: A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and Amazon Web Services accounts in the Amazon Web Services Systems Manager User Guide. + /// - targetLocations: A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide. + /// - targetLocationsURL: Specify a publicly accessible URL for a file that contains the TargetLocations body. Currently, only files in presigned Amazon S3 buckets are supported. /// - targetMaps: A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together. /// - targetParameterName: The name of the parameter used as the target resource for the rate-controlled execution. Required if you specify targets. - /// - targets: A key-value mapping to target resources. Required if you specify TargetParameterName. + /// - targets: A key-value mapping to target resources. Required if you specify TargetParameterName. If both this parameter and the TargetLocation:Targets parameter are supplied, TargetLocation:Targets takes precedence. /// - logger: Logger use during operation @inlinable public func startAutomationExecution( @@ -4649,6 +4650,7 @@ public struct SSM: AWSService { parameters: [String: [String]]? = nil, tags: [Tag]? = nil, targetLocations: [TargetLocation]? = nil, + targetLocationsURL: String? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil, @@ -4665,6 +4667,7 @@ public struct SSM: AWSService { parameters: parameters, tags: tags, targetLocations: targetLocations, + targetLocationsURL: targetLocationsURL, targetMaps: targetMaps, targetParameterName: targetParameterName, targets: targets @@ -5243,7 +5246,7 @@ public struct SSM: AWSService { /// - name: The new task name to specify. /// - priority: The new task priority to specify. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel. /// - replace: If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow operation are also required for this API request. Optional fields that aren't specified are set to null. - /// - serviceRoleArn: The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// - serviceRoleArn: The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. /// - targets: The targets (either managed nodes or tags) to modify. Managed nodes are specified using the format Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using the format Key=tag_name,Values=tag_value. One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide. /// - taskArn: The task ARN to modify. /// - taskInvocationParameters: The parameters that the task should use during execution. Populate only the fields that match the task type. All other fields should be empty. When you update a maintenance window task that has options specified in TaskInvocationParameters, you must provide again all the TaskInvocationParameters values that you want to retain. The values you don't specify again are removed. For example, suppose that when you registered a Run Command task, you specified TaskInvocationParameters values for Comment, NotificationConfig, and OutputS3BucketName. If you update the maintenance window task and specify only a different OutputS3BucketName value, the values for Comment and NotificationConfig are removed. @@ -5308,7 +5311,7 @@ public struct SSM: AWSService { /// Changes the Identity and Access Management (IAM) role that is assigned to the on-premises server, edge device, or virtual machines (VM). IAM roles are first assigned to these hybrid nodes during the activation process. For more information, see CreateActivation. /// /// Parameters: - /// - iamRole: The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. + /// - iamRole: The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. /// - instanceId: The ID of the managed node where you want to update the role. /// - logger: Logger use during operation @inlinable @@ -5354,7 +5357,7 @@ public struct SSM: AWSService { /// - priority: The importance of this OpsItem in relation to other OpsItems in the system. /// - relatedOpsItems: One or more OpsItems that share something in common with the current OpsItems. For example, related OpsItems can include OpsItems with similar error messages, impacted resources, or statuses for the impacted resource. /// - severity: Specify a new severity for an OpsItem. - /// - status: The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide. + /// - status: The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide. /// - title: A short heading that describes the nature of the OpsItem and the impacted resource. /// - logger: Logger use during operation @inlinable @@ -5450,14 +5453,14 @@ public struct SSM: AWSService { /// /// Parameters: /// - approvalRules: A set of rules used to include patches in the baseline. - /// - approvedPatches: A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// - approvedPatches: A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. /// - approvedPatchesComplianceLevel: Assigns a new compliance severity level to an existing patch baseline. /// - approvedPatchesEnableNonSecurity: Indicates whether the list of approved patches includes non-security updates that should be applied to the managed nodes. The default value is false. Applies to Linux managed nodes only. /// - baselineId: The ID of the patch baseline to update. /// - description: A description of the patch baseline. /// - globalFilters: A set of global filters used to include patches in the baseline. /// - name: The name of the patch baseline. - /// - rejectedPatches: A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// - rejectedPatches: A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. /// - rejectedPatchesAction: The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified. BLOCK All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. /// - replace: If True, then all fields that are required by the CreatePatchBaseline operation are also required for this API request. Optional fields that aren't specified are set to null. /// - sources: Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only. @@ -6062,7 +6065,7 @@ extension SSM { /// Return PaginatorSequence for operation ``describeInstancePatches(_:logger:)``. /// /// - Parameters: - /// - filters: Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Understanding patch compliance state values in the Amazon Web Services Systems Manager User Guide. + /// - filters: Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Patch compliance state values in the Amazon Web Services Systems Manager User Guide. /// - instanceId: The ID of the managed node whose patch state information should be retrieved. /// - maxResults: The maximum number of patches to return (per page). /// - logger: Logger used for logging @@ -6262,7 +6265,7 @@ extension SSM { /// Return PaginatorSequence for operation ``describeMaintenanceWindowExecutions(_:logger:)``. /// /// - Parameters: - /// - filters: Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2021-11-04T05:00:00Z. + /// - filters: Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2024-11-04T05:00:00Z. /// - maxResults: The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results. /// - windowId: The ID of the maintenance window whose executions should be retrieved. /// - logger: Logger used for logging diff --git a/Sources/Soto/Services/SSM/SSM_shapes.swift b/Sources/Soto/Services/SSM/SSM_shapes.swift index f97b8384b2..f20e3819a4 100644 --- a/Sources/Soto/Services/SSM/SSM_shapes.swift +++ b/Sources/Soto/Services/SSM/SSM_shapes.swift @@ -1547,7 +1547,7 @@ extension SSM { public let key: AttachmentsSourceKey? /// The name of the document attachment file. public let name: String? - /// The value of a key-value pair that identifies the location of an attachment to a document. The format for Value depends on the type of key you specify. For the key SourceUrl, the value is an S3 bucket location. For example: "Values": [ "s3://doc-example-bucket/my-folder" ] For the key S3FileUrl, the value is a file in an S3 bucket. For example: "Values": [ "s3://doc-example-bucket/my-folder/my-file.py" ] For the key AttachmentReference, the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example: "Values": [ "MyOtherDocument/3/my-other-file.py" ] However, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example: "Values": [ "arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py" ] + /// The value of a key-value pair that identifies the location of an attachment to a document. The format for Value depends on the type of key you specify. For the key SourceUrl, the value is an S3 bucket location. For example: "Values": [ "s3://amzn-s3-demo-bucket/my-prefix" ] For the key S3FileUrl, the value is a file in an S3 bucket. For example: "Values": [ "s3://amzn-s3-demo-bucket/my-prefix/my-file.py" ] For the key AttachmentReference, the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example: "Values": [ "MyOtherDocument/3/my-other-file.py" ] However, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example: "Values": [ "arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py" ] public let values: [String]? @inlinable @@ -1633,6 +1633,8 @@ extension SSM { public let target: String? /// The combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the Automation. public let targetLocations: [TargetLocation]? + /// A publicly accessible URL for a file that contains the TargetLocations body. Currently, only files in presigned Amazon S3 buckets are supported + public let targetLocationsURL: String? /// The specified key-value mapping of document parameters to target resources. public let targetMaps: [[String: [String]]]? /// The parameter name. @@ -1645,7 +1647,7 @@ extension SSM { public let variables: [String: [String]]? @inlinable - public init(alarmConfiguration: AlarmConfiguration? = nil, associationId: String? = nil, automationExecutionId: String? = nil, automationExecutionStatus: AutomationExecutionStatus? = nil, automationSubtype: AutomationSubtype? = nil, changeRequestName: String? = nil, currentAction: String? = nil, currentStepName: String? = nil, documentName: String? = nil, documentVersion: String? = nil, executedBy: String? = nil, executionEndTime: Date? = nil, executionStartTime: Date? = nil, failureMessage: String? = nil, maxConcurrency: String? = nil, maxErrors: String? = nil, mode: ExecutionMode? = nil, opsItemId: String? = nil, outputs: [String: [String]]? = nil, parameters: [String: [String]]? = nil, parentAutomationExecutionId: String? = nil, progressCounters: ProgressCounters? = nil, resolvedTargets: ResolvedTargets? = nil, runbooks: [Runbook]? = nil, scheduledTime: Date? = nil, stepExecutions: [StepExecution]? = nil, stepExecutionsTruncated: Bool? = nil, target: String? = nil, targetLocations: [TargetLocation]? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil, triggeredAlarms: [AlarmStateInformation]? = nil, variables: [String: [String]]? = nil) { + public init(alarmConfiguration: AlarmConfiguration? = nil, associationId: String? = nil, automationExecutionId: String? = nil, automationExecutionStatus: AutomationExecutionStatus? = nil, automationSubtype: AutomationSubtype? = nil, changeRequestName: String? = nil, currentAction: String? = nil, currentStepName: String? = nil, documentName: String? = nil, documentVersion: String? = nil, executedBy: String? = nil, executionEndTime: Date? = nil, executionStartTime: Date? = nil, failureMessage: String? = nil, maxConcurrency: String? = nil, maxErrors: String? = nil, mode: ExecutionMode? = nil, opsItemId: String? = nil, outputs: [String: [String]]? = nil, parameters: [String: [String]]? = nil, parentAutomationExecutionId: String? = nil, progressCounters: ProgressCounters? = nil, resolvedTargets: ResolvedTargets? = nil, runbooks: [Runbook]? = nil, scheduledTime: Date? = nil, stepExecutions: [StepExecution]? = nil, stepExecutionsTruncated: Bool? = nil, target: String? = nil, targetLocations: [TargetLocation]? = nil, targetLocationsURL: String? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil, triggeredAlarms: [AlarmStateInformation]? = nil, variables: [String: [String]]? = nil) { self.alarmConfiguration = alarmConfiguration self.associationId = associationId self.automationExecutionId = automationExecutionId @@ -1675,6 +1677,7 @@ extension SSM { self.stepExecutionsTruncated = stepExecutionsTruncated self.target = target self.targetLocations = targetLocations + self.targetLocationsURL = targetLocationsURL self.targetMaps = targetMaps self.targetParameterName = targetParameterName self.targets = targets @@ -1712,6 +1715,7 @@ extension SSM { case stepExecutionsTruncated = "StepExecutionsTruncated" case target = "Target" case targetLocations = "TargetLocations" + case targetLocationsURL = "TargetLocationsURL" case targetMaps = "TargetMaps" case targetParameterName = "TargetParameterName" case targets = "Targets" @@ -1758,7 +1762,7 @@ extension SSM { public let automationExecutionStatus: AutomationExecutionStatus? /// The subtype of the Automation operation. Currently, the only supported value is ChangeRequest. public let automationSubtype: AutomationSubtype? - /// Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide. + /// Use this filter with DescribeAutomationExecutions. Specify either Local or CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide. public let automationType: AutomationType? /// The name of the Change Manager change request. public let changeRequestName: String? @@ -1800,6 +1804,8 @@ extension SSM { public let scheduledTime: Date? /// The list of execution outputs as defined in the Automation runbook. public let target: String? + /// A publicly accessible URL for a file that contains the TargetLocations body. Currently, only files in presigned Amazon S3 buckets are supported + public let targetLocationsURL: String? /// The specified key-value mapping of document parameters to target resources. public let targetMaps: [[String: [String]]]? /// The list of execution outputs as defined in the Automation runbook. @@ -1810,7 +1816,7 @@ extension SSM { public let triggeredAlarms: [AlarmStateInformation]? @inlinable - public init(alarmConfiguration: AlarmConfiguration? = nil, associationId: String? = nil, automationExecutionId: String? = nil, automationExecutionStatus: AutomationExecutionStatus? = nil, automationSubtype: AutomationSubtype? = nil, automationType: AutomationType? = nil, changeRequestName: String? = nil, currentAction: String? = nil, currentStepName: String? = nil, documentName: String? = nil, documentVersion: String? = nil, executedBy: String? = nil, executionEndTime: Date? = nil, executionStartTime: Date? = nil, failureMessage: String? = nil, logFile: String? = nil, maxConcurrency: String? = nil, maxErrors: String? = nil, mode: ExecutionMode? = nil, opsItemId: String? = nil, outputs: [String: [String]]? = nil, parentAutomationExecutionId: String? = nil, resolvedTargets: ResolvedTargets? = nil, runbooks: [Runbook]? = nil, scheduledTime: Date? = nil, target: String? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil, triggeredAlarms: [AlarmStateInformation]? = nil) { + public init(alarmConfiguration: AlarmConfiguration? = nil, associationId: String? = nil, automationExecutionId: String? = nil, automationExecutionStatus: AutomationExecutionStatus? = nil, automationSubtype: AutomationSubtype? = nil, automationType: AutomationType? = nil, changeRequestName: String? = nil, currentAction: String? = nil, currentStepName: String? = nil, documentName: String? = nil, documentVersion: String? = nil, executedBy: String? = nil, executionEndTime: Date? = nil, executionStartTime: Date? = nil, failureMessage: String? = nil, logFile: String? = nil, maxConcurrency: String? = nil, maxErrors: String? = nil, mode: ExecutionMode? = nil, opsItemId: String? = nil, outputs: [String: [String]]? = nil, parentAutomationExecutionId: String? = nil, resolvedTargets: ResolvedTargets? = nil, runbooks: [Runbook]? = nil, scheduledTime: Date? = nil, target: String? = nil, targetLocationsURL: String? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil, triggeredAlarms: [AlarmStateInformation]? = nil) { self.alarmConfiguration = alarmConfiguration self.associationId = associationId self.automationExecutionId = automationExecutionId @@ -1837,6 +1843,7 @@ extension SSM { self.runbooks = runbooks self.scheduledTime = scheduledTime self.target = target + self.targetLocationsURL = targetLocationsURL self.targetMaps = targetMaps self.targetParameterName = targetParameterName self.targets = targets @@ -1870,6 +1877,7 @@ extension SSM { case runbooks = "Runbooks" case scheduledTime = "ScheduledTime" case target = "Target" + case targetLocationsURL = "TargetLocationsURL" case targetMaps = "TargetMaps" case targetParameterName = "TargetParameterName" case targets = "Targets" @@ -1879,7 +1887,7 @@ extension SSM { public struct BaselineOverride: AWSEncodableShape { public let approvalRules: PatchRuleGroup? - /// A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let approvedPatches: [String]? /// Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. public let approvedPatchesComplianceLevel: PatchComplianceLevel? @@ -1888,7 +1896,7 @@ extension SSM { public let globalFilters: PatchFilterGroup? /// The operating system rule used by the patch baseline override. public let operatingSystem: OperatingSystem? - /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let rejectedPatches: [String]? /// The action for Patch Manager to take on patches included in the RejectedPackages list. A patch can be allowed only if it is a dependency of another package, or blocked entirely along with packages that include it as a dependency. public let rejectedPatchesAction: PatchAction? @@ -2145,7 +2153,7 @@ extension SSM { public struct CommandFilter: AWSEncodableShape { /// The name of the filter. The ExecutionStage filter can't be used with the ListCommandInvocations operation, only with ListCommands. public let key: CommandFilterKey - /// The filter value. Valid values for each filter key are as follows: InvokedAfter: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions occurring July 7, 2021, and later. InvokedBefore: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions from before July 7, 2021. Status: Specify a valid command status to see a list of all command executions with that status. The status choices depend on the API you call. The status values you can specify for ListCommands are: Pending InProgress Success Cancelled Failed TimedOut (this includes both Delivery and Execution time outs) AccessDenied DeliveryTimedOut ExecutionTimedOut Incomplete NoInstancesInTag LimitExceeded The status values you can specify for ListCommandInvocations are: Pending InProgress Delayed Success Cancelled Failed TimedOut (this includes both Delivery and Execution time outs) AccessDenied DeliveryTimedOut ExecutionTimedOut Undeliverable InvalidPlatform Terminated DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on managed nodes. ExecutionStage: Specify one of the following values (ListCommands operations only): Executing: Returns a list of command executions that are currently still running. Complete: Returns a list of command executions that have already completed. + /// The filter value. Valid values for each filter key are as follows: InvokedAfter: Specify a timestamp to limit your results. For example, specify 2024-07-07T00:00:00Z to see a list of command executions occurring July 7, 2021, and later. InvokedBefore: Specify a timestamp to limit your results. For example, specify 2024-07-07T00:00:00Z to see a list of command executions from before July 7, 2021. Status: Specify a valid command status to see a list of all command executions with that status. The status choices depend on the API you call. The status values you can specify for ListCommands are: Pending InProgress Success Cancelled Failed TimedOut (this includes both Delivery and Execution time outs) AccessDenied DeliveryTimedOut ExecutionTimedOut Incomplete NoInstancesInTag LimitExceeded The status values you can specify for ListCommandInvocations are: Pending InProgress Delayed Success Cancelled Failed TimedOut (this includes both Delivery and Execution time outs) AccessDenied DeliveryTimedOut ExecutionTimedOut Undeliverable InvalidPlatform Terminated DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on managed nodes. ExecutionStage: Specify one of the following values (ListCommands operations only): Executing: Returns a list of command executions that are currently still running. Complete: Returns a list of command executions that have already completed. public let value: String @inlinable @@ -2244,9 +2252,9 @@ extension SSM { public let name: String? /// Output of the plugin execution. public let output: String? - /// The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response: doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript doc-example-bucket is the name of the S3 bucket; ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix; i-02573cafcfEXAMPLE is the managed node ID; awsrunShellScript is the name of the plugin. + /// The S3 bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response: amzn-s3-demo-bucket/my-prefix/i-02573cafcfEXAMPLE/awsrunShellScript amzn-s3-demo-bucket is the name of the S3 bucket; my-prefix is the name of the S3 prefix; i-02573cafcfEXAMPLE is the managed node ID; awsrunShellScript is the name of the plugin. public let outputS3BucketName: String? - /// The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response: doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript doc-example-bucket is the name of the S3 bucket; ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix; i-02573cafcfEXAMPLE is the managed node ID; awsrunShellScript is the name of the plugin. + /// The S3 directory path inside the bucket where the responses to the command executions should be stored. This was requested when issuing the command. For example, in the following response: amzn-s3-demo-bucket/my-prefix/i-02573cafcfEXAMPLE/awsrunShellScript amzn-s3-demo-bucket is the name of the S3 bucket; my-prefix is the name of the S3 prefix; i-02573cafcfEXAMPLE is the managed node ID; awsrunShellScript is the name of the plugin. public let outputS3KeyPrefix: String? /// (Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Amazon Web Services Systems Manager automatically determines the S3 bucket region. public let outputS3Region: String? @@ -2483,9 +2491,9 @@ extension SSM { public let defaultInstanceName: String? /// A user-defined description of the resource that you want to register with Systems Manager. Don't enter personally identifiable information in this field. public let description: String? - /// The date by which this activation request should expire, in timestamp format, such as "2021-07-07T00:00:00". You can specify a date up to 30 days in advance. If you don't provide an expiration date, the activation code expires in 24 hours. + /// The date by which this activation request should expire, in timestamp format, such as "2024-07-07T00:00:00". You can specify a date up to 30 days in advance. If you don't provide an expiration date, the activation code expires in 24 hours. public let expirationDate: Date? - /// The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. + /// The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in a hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. public let iamRole: String /// Specify the maximum number of managed nodes you want to register. The default value is 1. public let registrationLimit: Int? @@ -2755,7 +2763,7 @@ extension SSM { public let targetLocations: [TargetLocation]? /// A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together. public let targetMaps: [[String: [String]]]? - /// The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see About targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide. + /// The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see Understanding targets and rate controls in State Manager associations in the Amazon Web Services Systems Manager User Guide. public let targets: [Target]? @inlinable @@ -3223,7 +3231,7 @@ extension SSM { public struct CreatePatchBaselineRequest: AWSEncodableShape { /// A set of rules used to include patches in the baseline. public let approvalRules: PatchRuleGroup? - /// A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let approvedPatches: [String]? /// Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. The default value is UNSPECIFIED. public let approvedPatchesComplianceLevel: PatchComplianceLevel? @@ -3239,7 +3247,7 @@ extension SSM { public let name: String /// Defines the operating system the patch baseline applies to. The default value is WINDOWS. public let operatingSystem: OperatingSystem? - /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let rejectedPatches: [String]? /// The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified. BLOCK All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. public let rejectedPatchesAction: PatchAction? @@ -3491,7 +3499,7 @@ extension SSM { public struct DeleteInventoryResult: AWSDecodableShape { /// Every DeleteInventory operation is assigned a unique ID. This option returns a unique ID. You can use this ID to query the status of a delete operation. This option is useful for ensuring that a delete operation has completed before you begin other operations. public let deletionId: String? - /// A summary of the delete operation. For more information about this summary, see Understanding the delete inventory summary in the Amazon Web Services Systems Manager User Guide. + /// A summary of the delete operation. For more information about this summary, see Deleting custom inventory in the Amazon Web Services Systems Manager User Guide. public let deletionSummary: InventoryDeletionSummary? /// The name of the inventory data type specified in the request. public let typeName: String? @@ -4690,7 +4698,7 @@ extension SSM { } public struct DescribeInstancePatchesRequest: AWSEncodableShape { - /// Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Understanding patch compliance state values in the Amazon Web Services Systems Manager User Guide. + /// Each element in the array is a structure containing a key-value pair. Supported keys for DescribeInstancePatchesinclude the following: Classification Sample values: Security | SecurityUpdates KBId Sample values: KB4480056 | java-1.7.0-openjdk.x86_64 Severity Sample values: Important | Medium | Low State Sample values: Installed | InstalledOther | InstalledPendingReboot For lists of all State values, see Patch compliance state values in the Amazon Web Services Systems Manager User Guide. public let filters: [PatchOrchestratorFilter]? /// The ID of the managed node whose patch state information should be retrieved. public let instanceId: String @@ -4968,7 +4976,7 @@ extension SSM { } public struct DescribeMaintenanceWindowExecutionsRequest: AWSEncodableShape { - /// Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2021-11-04T05:00:00Z. + /// Each entry in the array is a structure containing: Key. A string between 1 and 128 characters. Supported keys include ExecutedBefore and ExecutedAfter. Values. An array of strings, each between 1 and 256 characters. Supported values are date/time strings in a valid ISO 8601 date/time format, such as 2024-11-04T05:00:00Z. public let filters: [MaintenanceWindowFilter]? /// The maximum number of items to return for this call. The call also returns a token that you can specify in a subsequent call to get the next set of results. public let maxResults: Int? @@ -7150,7 +7158,7 @@ extension SSM { public let name: String? /// The priority of the task when it runs. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel. public let priority: Int? - /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The targets where the task should run. public let targets: [Target]? @@ -7963,7 +7971,7 @@ extension SSM { public let lastPingDateTime: Date? /// The last date the association was successfully run. public let lastSuccessfulAssociationExecutionDate: Date? - /// The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName property using the CreateActivation command. It is applied to the managed node by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as explained in Install SSM Agent for a hybrid and multicloud environment (Linux) and Install SSM Agent for a hybrid and multicloud environment (Windows). To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference. + /// The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName property using the CreateActivation command. It is applied to the managed node by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as explained in How to install SSM Agent on hybrid Linux nodes and How to install SSM Agent on hybrid Windows Server nodes. To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference. public let name: String? /// Connection status of SSM Agent. The status Inactive has been deprecated and is no longer in use. public let pingStatus: PingStatus? @@ -8100,7 +8108,7 @@ extension SSM { public let installedPendingRebootCount: Int? /// The number of patches installed on a managed node that are specified in a RejectedPatches list. Patches with a status of InstalledRejected were typically installed before they were added to a RejectedPatches list. If ALLOW_AS_DEPENDENCY is the specified option for RejectedPatchesAction, the value of InstalledRejectedCount will always be 0 (zero). public let installedRejectedCount: Int? - /// An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches specified by the default patch baseline. For more information about the InstallOverrideList parameter, see About the AWS-RunPatchBaseline SSM document in the Amazon Web Services Systems Manager User Guide. + /// An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches specified by the default patch baseline. For more information about the InstallOverrideList parameter, see SSM Command document for patching: AWS-RunPatchBaseline in the Amazon Web Services Systems Manager User Guide. public let installOverrideList: String? /// The ID of the managed node the high-level patch compliance information was collected for. public let instanceId: String @@ -9969,7 +9977,7 @@ extension SSM { public let outputS3KeyPrefix: String? /// The parameters for the RUN_COMMAND task execution. public let parameters: [String: [String]]? - /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// If this time is reached and the command hasn't already started running, it doesn't run. public let timeoutSeconds: Int? @@ -10095,7 +10103,7 @@ extension SSM { public let name: String? /// The priority of the task in the maintenance window. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel. public let priority: Int? - /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The targets (either managed nodes or tags). Managed nodes are specified using Key=instanceids,Values=,. Tags are specified using Key=,Values=. public let targets: [Target]? @@ -10477,7 +10485,7 @@ extension SSM { public let severity: String? /// The origin of the OpsItem, such as Amazon EC2 or Systems Manager. The impacted resource is a subset of source. public let source: String? - /// The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide. + /// The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide. public let status: OpsItemStatus? /// A short heading that describes the nature of the OpsItem and the impacted resource. public let title: String? @@ -10773,7 +10781,7 @@ extension SSM { public let severity: String? /// The impacted Amazon Web Services resource. public let source: String? - /// The OpsItem status. Status can be Open, In Progress, or Resolved. + /// The OpsItem status. public let status: OpsItemStatus? /// A short heading that describes the nature of the OpsItem and the impacted resource. public let title: String? @@ -11463,9 +11471,9 @@ extension SSM { } public struct PatchRule: AWSEncodableShape & AWSDecodableShape { - /// The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. This parameter is marked as not required, but your request must include a value for either ApproveAfterDays or ApproveUntilDate. Not supported for Debian Server or Ubuntu Server. + /// The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of 7 means that patches are approved seven days after they are released. This parameter is marked as Required: No, but your request must include a value for either ApproveAfterDays or ApproveUntilDate. Not supported for Debian Server or Ubuntu Server. Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the Windows Server tab in the topic How security patches are selected in the Amazon Web Services Systems Manager User Guide. public let approveAfterDays: Int? - /// The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Enter dates in the format YYYY-MM-DD. For example, 2021-12-31. This parameter is marked as not required, but your request must include a value for either ApproveUntilDate or ApproveAfterDays. Not supported for Debian Server or Ubuntu Server. + /// The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Enter dates in the format YYYY-MM-DD. For example, 2024-12-31. This parameter is marked as Required: No, but your request must include a value for either ApproveUntilDate or ApproveAfterDays. Not supported for Debian Server or Ubuntu Server. Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the Windows Server tab in the topic How security patches are selected in the Amazon Web Services Systems Manager User Guide. public let approveUntilDate: String? /// A compliance severity level for all approved patches in a patch baseline. public let complianceLevel: PatchComplianceLevel? @@ -12014,7 +12022,7 @@ extension SSM { public let name: String? /// The priority of the task in the maintenance window, the lower the number the higher the priority. Tasks in a maintenance window are scheduled in priority order with tasks that have the same priority scheduled in parallel. public let priority: Int? - /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The targets (either managed nodes or maintenance window targets). One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide. Specify managed nodes using the following format: Key=InstanceIds,Values=, Specify maintenance window targets using the following format: Key=WindowTargetIds,Values=, public let targets: [Target]? @@ -13009,7 +13017,7 @@ extension SSM { public struct SessionFilter: AWSEncodableShape { /// The name of the filter. public let key: SessionFilterKey - /// The filter value. Valid values for each filter key are as follows: InvokedAfter: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later. InvokedBefore: Specify a timestamp to limit your results. For example, specify 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018. Target: Specify a managed node to which session connections have been made. Owner: Specify an Amazon Web Services user to see a list of sessions started by that user. Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include: Connected Connecting Disconnected Terminated Terminating Failed SessionId: Specify a session ID to return details about the session. + /// The filter value. Valid values for each filter key are as follows: InvokedAfter: Specify a timestamp to limit your results. For example, specify 2024-08-29T00:00:00Z to see sessions that started August 29, 2024, and later. InvokedBefore: Specify a timestamp to limit your results. For example, specify 2024-08-29T00:00:00Z to see sessions that started before August 29, 2024. Target: Specify a managed node to which session connections have been made. Owner: Specify an Amazon Web Services user to see a list of sessions started by that user. Status: Specify a valid session status to see a list of all sessions with that status. Status values you can specify include: Connected Connecting Disconnected Terminated Terminating Failed SessionId: Specify a session ID to return details about the session. public let value: String @inlinable @@ -13116,9 +13124,9 @@ extension SSM { public let documentName: String /// The version of the Automation runbook to use for this execution. public let documentVersion: String? - /// The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10. + /// The maximum number of targets allowed to run this task in parallel. You can specify a number, such as 10, or a percentage, such as 10%. The default value is 10. If both this parameter and the TargetLocation:TargetsMaxConcurrency are supplied, TargetLocation:TargetsMaxConcurrency takes precedence. public let maxConcurrency: String? - /// The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time. + /// The number of errors that are allowed before the system stops running the automation on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops running the automation when the fourth error is received. If you specify 0, then the system stops running the automation on additional targets after the first error result is returned. If you run an automation on 50 resources and set max-errors to 10%, then the system stops running the automation on additional targets when the sixth error is received. Executions that are already running an automation when max-errors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one at a time. If this parameter and the TargetLocation:TargetsMaxErrors parameter are both supplied, TargetLocation:TargetsMaxErrors takes precedence. public let maxErrors: String? /// The execution mode of the automation. Valid modes include the following: Auto and Interactive. The default mode is Auto. public let mode: ExecutionMode? @@ -13126,17 +13134,19 @@ extension SSM { public let parameters: [String: [String]]? /// Optional metadata that you assign to a resource. You can specify a maximum of five tags for an automation. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag an automation to identify an environment or operating system. In this case, you could specify the following key-value pairs: Key=environment,Value=test Key=OS,Value=Windows To add tags to an existing automation, use the AddTagsToResource operation. public let tags: [Tag]? - /// A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and Amazon Web Services accounts in the Amazon Web Services Systems Manager User Guide. + /// A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the Amazon Web Services Systems Manager User Guide. public let targetLocations: [TargetLocation]? + /// Specify a publicly accessible URL for a file that contains the TargetLocations body. Currently, only files in presigned Amazon S3 buckets are supported. + public let targetLocationsURL: String? /// A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together. public let targetMaps: [[String: [String]]]? /// The name of the parameter used as the target resource for the rate-controlled execution. Required if you specify targets. public let targetParameterName: String? - /// A key-value mapping to target resources. Required if you specify TargetParameterName. + /// A key-value mapping to target resources. Required if you specify TargetParameterName. If both this parameter and the TargetLocation:Targets parameter are supplied, TargetLocation:Targets takes precedence. public let targets: [Target]? @inlinable - public init(alarmConfiguration: AlarmConfiguration? = nil, clientToken: String? = nil, documentName: String, documentVersion: String? = nil, maxConcurrency: String? = nil, maxErrors: String? = nil, mode: ExecutionMode? = nil, parameters: [String: [String]]? = nil, tags: [Tag]? = nil, targetLocations: [TargetLocation]? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil) { + public init(alarmConfiguration: AlarmConfiguration? = nil, clientToken: String? = nil, documentName: String, documentVersion: String? = nil, maxConcurrency: String? = nil, maxErrors: String? = nil, mode: ExecutionMode? = nil, parameters: [String: [String]]? = nil, tags: [Tag]? = nil, targetLocations: [TargetLocation]? = nil, targetLocationsURL: String? = nil, targetMaps: [[String: [String]]]? = nil, targetParameterName: String? = nil, targets: [Target]? = nil) { self.alarmConfiguration = alarmConfiguration self.clientToken = clientToken self.documentName = documentName @@ -13147,6 +13157,7 @@ extension SSM { self.parameters = parameters self.tags = tags self.targetLocations = targetLocations + self.targetLocationsURL = targetLocationsURL self.targetMaps = targetMaps self.targetParameterName = targetParameterName self.targets = targets @@ -13181,6 +13192,7 @@ extension SSM { } try self.validate(self.targetLocations, name: "targetLocations", parent: name, max: 100) try self.validate(self.targetLocations, name: "targetLocations", parent: name, min: 1) + try self.validate(self.targetLocationsURL, name: "targetLocationsURL", parent: name, pattern: "^https:\\/\\/[-a-zA-Z0-9@:%._\\+~#=]{1,253}\\.s3(\\.[a-z\\d-]{9,16})?\\.amazonaws\\.com\\/.{1,2000}$") try self.targetMaps?.forEach { try validate($0, name: "targetMaps[]", parent: name, max: 20) try validate($0, name: "targetMaps[]", parent: name, min: 1) @@ -13205,6 +13217,7 @@ extension SSM { case parameters = "Parameters" case tags = "Tags" case targetLocations = "TargetLocations" + case targetLocationsURL = "TargetLocationsURL" case targetMaps = "TargetMaps" case targetParameterName = "TargetParameterName" case targets = "Targets" @@ -13598,8 +13611,12 @@ extension SSM { public struct TargetLocation: AWSEncodableShape & AWSDecodableShape { /// The Amazon Web Services accounts targeted by the current Automation execution. public let accounts: [String]? + /// Amazon Web Services accounts or organizational units to exclude as expanded targets. + public let excludeAccounts: [String]? /// The Automation execution role used by the currently running Automation. If not specified, the default value is AWS-SystemsManager-AutomationExecutionRole. public let executionRoleName: String? + /// Indicates whether to include child organizational units (OUs) that are children of the targeted OUs. The default is false. + public let includeChildOrganizationUnits: Bool? /// The Amazon Web Services Regions targeted by the current Automation execution. public let regions: [String]? public let targetLocationAlarmConfiguration: AlarmConfiguration? @@ -13607,20 +13624,38 @@ extension SSM { public let targetLocationMaxConcurrency: String? /// The maximum number of errors allowed before the system stops queueing additional Automation executions for the currently running Automation. public let targetLocationMaxErrors: String? + /// A list of key-value mappings to target resources. If you specify values for this data type, you must also specify a value for TargetParameterName. This Targets parameter takes precedence over the StartAutomationExecution:Targets parameter if both are supplied. + public let targets: [Target]? + /// The maximum number of targets allowed to run this task in parallel. This TargetsMaxConcurrency takes precedence over the StartAutomationExecution:MaxConcurrency parameter if both are supplied. + public let targetsMaxConcurrency: String? + /// The maximum number of errors that are allowed before the system stops running the automation on additional targets. This TargetsMaxErrors parameter takes precedence over the StartAutomationExecution:MaxErrors parameter if both are supplied. + public let targetsMaxErrors: String? @inlinable - public init(accounts: [String]? = nil, executionRoleName: String? = nil, regions: [String]? = nil, targetLocationAlarmConfiguration: AlarmConfiguration? = nil, targetLocationMaxConcurrency: String? = nil, targetLocationMaxErrors: String? = nil) { + public init(accounts: [String]? = nil, excludeAccounts: [String]? = nil, executionRoleName: String? = nil, includeChildOrganizationUnits: Bool? = nil, regions: [String]? = nil, targetLocationAlarmConfiguration: AlarmConfiguration? = nil, targetLocationMaxConcurrency: String? = nil, targetLocationMaxErrors: String? = nil, targets: [Target]? = nil, targetsMaxConcurrency: String? = nil, targetsMaxErrors: String? = nil) { self.accounts = accounts + self.excludeAccounts = excludeAccounts self.executionRoleName = executionRoleName + self.includeChildOrganizationUnits = includeChildOrganizationUnits self.regions = regions self.targetLocationAlarmConfiguration = targetLocationAlarmConfiguration self.targetLocationMaxConcurrency = targetLocationMaxConcurrency self.targetLocationMaxErrors = targetLocationMaxErrors + self.targets = targets + self.targetsMaxConcurrency = targetsMaxConcurrency + self.targetsMaxErrors = targetsMaxErrors } public func validate(name: String) throws { try self.validate(self.accounts, name: "accounts", parent: name, max: 50) try self.validate(self.accounts, name: "accounts", parent: name, min: 1) + try self.excludeAccounts?.forEach { + try validate($0, name: "excludeAccounts[]", parent: name, max: 68) + try validate($0, name: "excludeAccounts[]", parent: name, min: 6) + try validate($0, name: "excludeAccounts[]", parent: name, pattern: "^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32})|(\\d{12})$") + } + try self.validate(self.excludeAccounts, name: "excludeAccounts", parent: name, max: 5000) + try self.validate(self.excludeAccounts, name: "excludeAccounts", parent: name, min: 1) try self.validate(self.executionRoleName, name: "executionRoleName", parent: name, max: 64) try self.validate(self.executionRoleName, name: "executionRoleName", parent: name, min: 1) try self.validate(self.executionRoleName, name: "executionRoleName", parent: name, pattern: "^[\\w+=,.@/-]+$") @@ -13633,15 +13668,30 @@ extension SSM { try self.validate(self.targetLocationMaxErrors, name: "targetLocationMaxErrors", parent: name, max: 7) try self.validate(self.targetLocationMaxErrors, name: "targetLocationMaxErrors", parent: name, min: 1) try self.validate(self.targetLocationMaxErrors, name: "targetLocationMaxErrors", parent: name, pattern: "^([1-9][0-9]*|[0]|[1-9][0-9]%|[0-9]%|100%)$") + try self.targets?.forEach { + try $0.validate(name: "\(name).targets[]") + } + try self.validate(self.targets, name: "targets", parent: name, max: 5) + try self.validate(self.targetsMaxConcurrency, name: "targetsMaxConcurrency", parent: name, max: 7) + try self.validate(self.targetsMaxConcurrency, name: "targetsMaxConcurrency", parent: name, min: 1) + try self.validate(self.targetsMaxConcurrency, name: "targetsMaxConcurrency", parent: name, pattern: "^([1-9][0-9]*|[1-9][0-9]%|[1-9]%|100%)$") + try self.validate(self.targetsMaxErrors, name: "targetsMaxErrors", parent: name, max: 7) + try self.validate(self.targetsMaxErrors, name: "targetsMaxErrors", parent: name, min: 1) + try self.validate(self.targetsMaxErrors, name: "targetsMaxErrors", parent: name, pattern: "^([1-9][0-9]*|[0]|[1-9][0-9]%|[0-9]%|100%)$") } private enum CodingKeys: String, CodingKey { case accounts = "Accounts" + case excludeAccounts = "ExcludeAccounts" case executionRoleName = "ExecutionRoleName" + case includeChildOrganizationUnits = "IncludeChildOrganizationUnits" case regions = "Regions" case targetLocationAlarmConfiguration = "TargetLocationAlarmConfiguration" case targetLocationMaxConcurrency = "TargetLocationMaxConcurrency" case targetLocationMaxErrors = "TargetLocationMaxErrors" + case targets = "Targets" + case targetsMaxConcurrency = "TargetsMaxConcurrency" + case targetsMaxErrors = "TargetsMaxErrors" } } @@ -14306,7 +14356,7 @@ extension SSM { public let priority: Int? /// If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow operation are also required for this API request. Optional fields that aren't specified are set to null. public let replace: Bool? - /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The targets (either managed nodes or tags) to modify. Managed nodes are specified using the format Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using the format Key=tag_name,Values=tag_value. One or more targets must be specified for maintenance window Run Command-type tasks. Depending on the task, targets are optional for other maintenance window task types (Automation, Lambda, and Step Functions). For more information about running tasks that don't specify targets, see Registering maintenance window tasks without targets in the Amazon Web Services Systems Manager User Guide. public let targets: [Target]? @@ -14413,7 +14463,7 @@ extension SSM { public let name: String? /// The updated priority value. public let priority: Int? - /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up maintenance windows in the in the Amazon Web Services Systems Manager User Guide. + /// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow. However, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the Amazon Web Services Systems Manager User Guide. public let serviceRoleArn: String? /// The updated target values. public let targets: [Target]? @@ -14467,7 +14517,7 @@ extension SSM { } public struct UpdateManagedInstanceRoleRequest: AWSEncodableShape { - /// The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an IAM service role for a hybrid and multicloud environment in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. + /// The name of the Identity and Access Management (IAM) role that you want to assign to the managed node. This IAM role must provide AssumeRole permissions for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide. You can't specify an IAM service-linked role for this parameter. You must create a unique role. public let iamRole: String /// The ID of the managed node where you want to update the role. public let instanceId: String @@ -14524,7 +14574,7 @@ extension SSM { public let relatedOpsItems: [RelatedOpsItem]? /// Specify a new severity for an OpsItem. public let severity: String? - /// The OpsItem status. Status can be Open, In Progress, or Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide. + /// The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide. public let status: OpsItemStatus? /// A short heading that describes the nature of the OpsItem and the impacted resource. public let title: String? @@ -14660,7 +14710,7 @@ extension SSM { public struct UpdatePatchBaselineRequest: AWSEncodableShape { /// A set of rules used to include patches in the baseline. public let approvalRules: PatchRuleGroup? - /// A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// A list of explicitly approved patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let approvedPatches: [String]? /// Assigns a new compliance severity level to an existing patch baseline. public let approvedPatchesComplianceLevel: PatchComplianceLevel? @@ -14674,7 +14724,7 @@ extension SSM { public let globalFilters: PatchFilterGroup? /// The name of the patch baseline. public let name: String? - /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. + /// A list of explicitly rejected patches for the baseline. For information about accepted formats for lists of approved patches and rejected patches, see Package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. public let rejectedPatches: [String]? /// The action for Patch Manager to take on patches included in the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the default action if no option is specified. BLOCK All OSs: Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. public let rejectedPatchesAction: PatchAction? @@ -15274,7 +15324,7 @@ public struct SSMErrorType: AWSErrorType { public static var subTypeCountLimitExceededException: Self { .init(.subTypeCountLimitExceededException) } /// You specified the Safe option for the DeregisterTargetFromMaintenanceWindow operation, but the target is still referenced in a task. public static var targetInUseException: Self { .init(.targetInUseException) } - /// The specified target managed node for the session isn't fully configured for use with Session Manager. For more information, see Getting started with Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you attempt to start a session on a managed node that is located in a different account or Region + /// The specified target managed node for the session isn't fully configured for use with Session Manager. For more information, see Setting up Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you attempt to start a session on a managed node that is located in a different account or Region public static var targetNotConnected: Self { .init(.targetNotConnected) } /// The Targets parameter includes too many tags. Remove one or more tags and try the command again. public static var tooManyTagsError: Self { .init(.tooManyTagsError) } diff --git a/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift b/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift index afbab0ad1c..3f72d30772 100644 --- a/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift +++ b/Sources/Soto/Services/SSMQuickSetup/SSMQuickSetup_api.swift @@ -65,6 +65,7 @@ public struct SSMQuickSetup: AWSService { serviceProtocol: .restjson, apiVersion: "2018-05-10", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: SSMQuickSetupErrorType.self, middleware: middleware, timeout: timeout, @@ -76,6 +77,16 @@ public struct SSMQuickSetup: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "ca-central-1": "ssm-quicksetup-fips.ca-central-1.amazonaws.com", + "us-east-1": "ssm-quicksetup-fips.us-east-1.amazonaws.com", + "us-east-2": "ssm-quicksetup-fips.us-east-2.amazonaws.com", + "us-west-1": "ssm-quicksetup-fips.us-west-1.amazonaws.com", + "us-west-2": "ssm-quicksetup-fips.us-west-2.amazonaws.com" + ]) + ]} // MARK: API Calls diff --git a/Sources/Soto/Services/SageMaker/SageMaker_api.swift b/Sources/Soto/Services/SageMaker/SageMaker_api.swift index 327dead69b..c572fa632f 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_api.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_api.swift @@ -889,6 +889,7 @@ public struct SageMaker: AWSService { /// - domainSettings: A collection of Domain settings. /// - kmsKeyId: SageMaker uses Amazon Web Services KMS to encrypt EFS and EBS volumes attached to the domain with an Amazon Web Services managed key by default. For more control, specify a customer managed key. /// - subnetIds: The VPC subnets that the domain uses for communication. + /// - tagPropagation: Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED. /// - tags: Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. Tags that you specify for the Domain are also added to all Apps that the Domain launches. /// - vpcId: The ID of the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. /// - logger: Logger use during operation @@ -903,6 +904,7 @@ public struct SageMaker: AWSService { domainSettings: DomainSettings? = nil, kmsKeyId: String? = nil, subnetIds: [String]? = nil, + tagPropagation: TagPropagation? = nil, tags: [Tag]? = nil, vpcId: String? = nil, logger: Logger = AWSClient.loggingDisabled @@ -917,6 +919,7 @@ public struct SageMaker: AWSService { domainSettings: domainSettings, kmsKeyId: kmsKeyId, subnetIds: subnetIds, + tagPropagation: tagPropagation, tags: tags, vpcId: vpcId ) @@ -12168,6 +12171,7 @@ public struct SageMaker: AWSService { /// - domainId: The ID of the domain to be updated. /// - domainSettingsForUpdate: A collection of DomainSettings configuration values to update. /// - subnetIds: The VPC subnets that Studio uses for communication. If removing subnets, ensure there are no apps in the InService, Pending, or Deleting state. + /// - tagPropagation: Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED. /// - logger: Logger use during operation @inlinable public func updateDomain( @@ -12178,6 +12182,7 @@ public struct SageMaker: AWSService { domainId: String? = nil, domainSettingsForUpdate: DomainSettingsForUpdate? = nil, subnetIds: [String]? = nil, + tagPropagation: TagPropagation? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateDomainResponse { let input = UpdateDomainRequest( @@ -12187,7 +12192,8 @@ public struct SageMaker: AWSService { defaultUserSettings: defaultUserSettings, domainId: domainId, domainSettingsForUpdate: domainSettingsForUpdate, - subnetIds: subnetIds + subnetIds: subnetIds, + tagPropagation: tagPropagation ) return try await self.updateDomain(input, logger: logger) } diff --git a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift index ba443a16e2..ec94594ff8 100644 --- a/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift +++ b/Sources/Soto/Services/SageMaker/SageMaker_shapes.swift @@ -130,6 +130,14 @@ extension SageMaker { case mlG648Xlarge = "ml.g6.48xlarge" case mlG64Xlarge = "ml.g6.4xlarge" case mlG68Xlarge = "ml.g6.8xlarge" + case mlG6E12Xlarge = "ml.g6e.12xlarge" + case mlG6E16Xlarge = "ml.g6e.16xlarge" + case mlG6E24Xlarge = "ml.g6e.24xlarge" + case mlG6E2Xlarge = "ml.g6e.2xlarge" + case mlG6E48Xlarge = "ml.g6e.48xlarge" + case mlG6E4Xlarge = "ml.g6e.4xlarge" + case mlG6E8Xlarge = "ml.g6e.8xlarge" + case mlG6EXlarge = "ml.g6e.xlarge" case mlG6Xlarge = "ml.g6.xlarge" case mlGeospatialInteractive = "ml.geospatial.interactive" case mlM512Xlarge = "ml.m5.12xlarge" @@ -1513,6 +1521,7 @@ extension SageMaker { case jumpStart = "JumpStart" case modelEvaluation = "ModelEvaluation" case models = "Models" + case performanceEvaluation = "PerformanceEvaluation" case pipelines = "Pipelines" case projects = "Projects" case training = "Training" @@ -2376,6 +2385,11 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum SageMakerImageName: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case sagemakerDistribution = "sagemaker_distribution" + public var description: String { return self.rawValue } + } + public enum SagemakerServicecatalogStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case disabled = "Disabled" case enabled = "Enabled" @@ -2608,6 +2622,12 @@ extension SageMaker { public var description: String { return self.rawValue } } + public enum TagPropagation: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case disabled = "DISABLED" + case enabled = "ENABLED" + public var description: String { return self.rawValue } + } + public enum TargetDevice: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case aisage = "aisage" case ambaCv2 = "amba_cv2" @@ -5937,7 +5957,7 @@ extension SageMaker { try self.validate(self.executionRole, name: "executionRole", parent: name, max: 2048) try self.validate(self.executionRole, name: "executionRole", parent: name, min: 20) try self.validate(self.executionRole, name: "executionRole", parent: name, pattern: "^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$") - try self.validate(self.instanceCount, name: "instanceCount", parent: name, min: 1) + try self.validate(self.instanceCount, name: "instanceCount", parent: name, min: 0) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, max: 63) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, min: 1) try self.validate(self.instanceGroupName, name: "instanceGroupName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$") @@ -6198,6 +6218,8 @@ extension SageMaker { public struct CodeEditorAppSettings: AWSEncodableShape & AWSDecodableShape { /// Settings that are used to configure and manage the lifecycle of CodeEditor applications. public let appLifecycleManagement: AppLifecycleManagement? + /// The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration. + public let builtInLifecycleConfigArn: String? /// A list of custom SageMaker images that are configured to run as a Code Editor app. public let customImages: [CustomImage]? public let defaultResourceSpec: ResourceSpec? @@ -6205,8 +6227,9 @@ extension SageMaker { public let lifecycleConfigArns: [String]? @inlinable - public init(appLifecycleManagement: AppLifecycleManagement? = nil, customImages: [CustomImage]? = nil, defaultResourceSpec: ResourceSpec? = nil, lifecycleConfigArns: [String]? = nil) { + public init(appLifecycleManagement: AppLifecycleManagement? = nil, builtInLifecycleConfigArn: String? = nil, customImages: [CustomImage]? = nil, defaultResourceSpec: ResourceSpec? = nil, lifecycleConfigArns: [String]? = nil) { self.appLifecycleManagement = appLifecycleManagement + self.builtInLifecycleConfigArn = builtInLifecycleConfigArn self.customImages = customImages self.defaultResourceSpec = defaultResourceSpec self.lifecycleConfigArns = lifecycleConfigArns @@ -6214,6 +6237,8 @@ extension SageMaker { public func validate(name: String) throws { try self.appLifecycleManagement?.validate(name: "\(name).appLifecycleManagement") + try self.validate(self.builtInLifecycleConfigArn, name: "builtInLifecycleConfigArn", parent: name, max: 256) + try self.validate(self.builtInLifecycleConfigArn, name: "builtInLifecycleConfigArn", parent: name, pattern: "^(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:studio-lifecycle-config/.*|None)$") try self.customImages?.forEach { try $0.validate(name: "\(name).customImages[]") } @@ -6227,6 +6252,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case appLifecycleManagement = "AppLifecycleManagement" + case builtInLifecycleConfigArn = "BuiltInLifecycleConfigArn" case customImages = "CustomImages" case defaultResourceSpec = "DefaultResourceSpec" case lifecycleConfigArns = "LifecycleConfigArns" @@ -7625,13 +7651,15 @@ extension SageMaker { public let kmsKeyId: String? /// The VPC subnets that the domain uses for communication. public let subnetIds: [String]? + /// Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED. + public let tagPropagation: TagPropagation? /// Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API. Tags that you specify for the Domain are also added to all Apps that the Domain launches. public let tags: [Tag]? /// The ID of the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. public let vpcId: String? @inlinable - public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, kmsKeyId: String? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, vpcId: String? = nil) { + public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, kmsKeyId: String? = nil, subnetIds: [String]? = nil, tagPropagation: TagPropagation? = nil, tags: [Tag]? = nil, vpcId: String? = nil) { self.appNetworkAccessType = appNetworkAccessType self.appSecurityGroupManagement = appSecurityGroupManagement self.authMode = authMode @@ -7642,13 +7670,14 @@ extension SageMaker { self.homeEfsFileSystemKmsKeyId = nil self.kmsKeyId = kmsKeyId self.subnetIds = subnetIds + self.tagPropagation = tagPropagation self.tags = tags self.vpcId = vpcId } @available(*, deprecated, message: "Members homeEfsFileSystemKmsKeyId have been deprecated") @inlinable - public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, homeEfsFileSystemKmsKeyId: String? = nil, kmsKeyId: String? = nil, subnetIds: [String]? = nil, tags: [Tag]? = nil, vpcId: String? = nil) { + public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, homeEfsFileSystemKmsKeyId: String? = nil, kmsKeyId: String? = nil, subnetIds: [String]? = nil, tagPropagation: TagPropagation? = nil, tags: [Tag]? = nil, vpcId: String? = nil) { self.appNetworkAccessType = appNetworkAccessType self.appSecurityGroupManagement = appSecurityGroupManagement self.authMode = authMode @@ -7659,6 +7688,7 @@ extension SageMaker { self.homeEfsFileSystemKmsKeyId = homeEfsFileSystemKmsKeyId self.kmsKeyId = kmsKeyId self.subnetIds = subnetIds + self.tagPropagation = tagPropagation self.tags = tags self.vpcId = vpcId } @@ -7698,6 +7728,7 @@ extension SageMaker { case homeEfsFileSystemKmsKeyId = "HomeEfsFileSystemKmsKeyId" case kmsKeyId = "KmsKeyId" case subnetIds = "SubnetIds" + case tagPropagation = "TagPropagation" case tags = "Tags" case vpcId = "VpcId" } @@ -13478,6 +13509,8 @@ extension SageMaker { public let appName: String? /// The type of app. public let appType: AppType? + /// The lifecycle configuration that runs before the default lifecycle configuration + public let builtInLifecycleConfigArn: String? /// The creation time of the application. After an application has been shut down for 24 hours, SageMaker deletes all metadata for the application. To be considered an update and retain application metadata, applications must be restarted within 24 hours after the previous application has been shut down. After this time window, creation of an application is considered a new application rather than an update of the previous application. public let creationTime: Date? /// The domain ID. @@ -13498,10 +13531,11 @@ extension SageMaker { public let userProfileName: String? @inlinable - public init(appArn: String? = nil, appName: String? = nil, appType: AppType? = nil, creationTime: Date? = nil, domainId: String? = nil, failureReason: String? = nil, lastHealthCheckTimestamp: Date? = nil, lastUserActivityTimestamp: Date? = nil, resourceSpec: ResourceSpec? = nil, spaceName: String? = nil, status: AppStatus? = nil, userProfileName: String? = nil) { + public init(appArn: String? = nil, appName: String? = nil, appType: AppType? = nil, builtInLifecycleConfigArn: String? = nil, creationTime: Date? = nil, domainId: String? = nil, failureReason: String? = nil, lastHealthCheckTimestamp: Date? = nil, lastUserActivityTimestamp: Date? = nil, resourceSpec: ResourceSpec? = nil, spaceName: String? = nil, status: AppStatus? = nil, userProfileName: String? = nil) { self.appArn = appArn self.appName = appName self.appType = appType + self.builtInLifecycleConfigArn = builtInLifecycleConfigArn self.creationTime = creationTime self.domainId = domainId self.failureReason = failureReason @@ -13517,6 +13551,7 @@ extension SageMaker { case appArn = "AppArn" case appName = "AppName" case appType = "AppType" + case builtInLifecycleConfigArn = "BuiltInLifecycleConfigArn" case creationTime = "CreationTime" case domainId = "DomainId" case failureReason = "FailureReason" @@ -14442,13 +14477,15 @@ extension SageMaker { public let status: DomainStatus? /// The VPC subnets that the domain uses for communication. public let subnetIds: [String]? + /// Indicates whether custom tag propagation is supported for the domain. + public let tagPropagation: TagPropagation? /// The domain's URL. public let url: String? /// The ID of the Amazon Virtual Private Cloud (VPC) that the domain uses for communication. public let vpcId: String? @inlinable - public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, creationTime: Date? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainArn: String? = nil, domainId: String? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, failureReason: String? = nil, homeEfsFileSystemId: String? = nil, kmsKeyId: String? = nil, lastModifiedTime: Date? = nil, securityGroupIdForDomainBoundary: String? = nil, singleSignOnApplicationArn: String? = nil, singleSignOnManagedApplicationInstanceId: String? = nil, status: DomainStatus? = nil, subnetIds: [String]? = nil, url: String? = nil, vpcId: String? = nil) { + public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, creationTime: Date? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainArn: String? = nil, domainId: String? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, failureReason: String? = nil, homeEfsFileSystemId: String? = nil, kmsKeyId: String? = nil, lastModifiedTime: Date? = nil, securityGroupIdForDomainBoundary: String? = nil, singleSignOnApplicationArn: String? = nil, singleSignOnManagedApplicationInstanceId: String? = nil, status: DomainStatus? = nil, subnetIds: [String]? = nil, tagPropagation: TagPropagation? = nil, url: String? = nil, vpcId: String? = nil) { self.appNetworkAccessType = appNetworkAccessType self.appSecurityGroupManagement = appSecurityGroupManagement self.authMode = authMode @@ -14469,13 +14506,14 @@ extension SageMaker { self.singleSignOnManagedApplicationInstanceId = singleSignOnManagedApplicationInstanceId self.status = status self.subnetIds = subnetIds + self.tagPropagation = tagPropagation self.url = url self.vpcId = vpcId } @available(*, deprecated, message: "Members homeEfsFileSystemKmsKeyId have been deprecated") @inlinable - public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, creationTime: Date? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainArn: String? = nil, domainId: String? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, failureReason: String? = nil, homeEfsFileSystemId: String? = nil, homeEfsFileSystemKmsKeyId: String? = nil, kmsKeyId: String? = nil, lastModifiedTime: Date? = nil, securityGroupIdForDomainBoundary: String? = nil, singleSignOnApplicationArn: String? = nil, singleSignOnManagedApplicationInstanceId: String? = nil, status: DomainStatus? = nil, subnetIds: [String]? = nil, url: String? = nil, vpcId: String? = nil) { + public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, authMode: AuthMode? = nil, creationTime: Date? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainArn: String? = nil, domainId: String? = nil, domainName: String? = nil, domainSettings: DomainSettings? = nil, failureReason: String? = nil, homeEfsFileSystemId: String? = nil, homeEfsFileSystemKmsKeyId: String? = nil, kmsKeyId: String? = nil, lastModifiedTime: Date? = nil, securityGroupIdForDomainBoundary: String? = nil, singleSignOnApplicationArn: String? = nil, singleSignOnManagedApplicationInstanceId: String? = nil, status: DomainStatus? = nil, subnetIds: [String]? = nil, tagPropagation: TagPropagation? = nil, url: String? = nil, vpcId: String? = nil) { self.appNetworkAccessType = appNetworkAccessType self.appSecurityGroupManagement = appSecurityGroupManagement self.authMode = authMode @@ -14496,6 +14534,7 @@ extension SageMaker { self.singleSignOnManagedApplicationInstanceId = singleSignOnManagedApplicationInstanceId self.status = status self.subnetIds = subnetIds + self.tagPropagation = tagPropagation self.url = url self.vpcId = vpcId } @@ -14521,6 +14560,7 @@ extension SageMaker { case singleSignOnManagedApplicationInstanceId = "SingleSignOnManagedApplicationInstanceId" case status = "Status" case subnetIds = "SubnetIds" + case tagPropagation = "TagPropagation" case url = "Url" case vpcId = "VpcId" } @@ -20780,6 +20820,33 @@ extension SageMaker { } } + public struct HiddenSageMakerImage: AWSEncodableShape & AWSDecodableShape { + /// The SageMaker image name that you are hiding from the Studio user interface. + public let sageMakerImageName: SageMakerImageName? + /// The version aliases you are hiding from the Studio user interface. + public let versionAliases: [String]? + + @inlinable + public init(sageMakerImageName: SageMakerImageName? = nil, versionAliases: [String]? = nil) { + self.sageMakerImageName = sageMakerImageName + self.versionAliases = versionAliases + } + + public func validate(name: String) throws { + try self.versionAliases?.forEach { + try validate($0, name: "versionAliases[]", parent: name, max: 128) + try validate($0, name: "versionAliases[]", parent: name, min: 1) + try validate($0, name: "versionAliases[]", parent: name, pattern: "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)$") + } + try self.validate(self.versionAliases, name: "versionAliases", parent: name, max: 20) + } + + private enum CodingKeys: String, CodingKey { + case sageMakerImageName = "SageMakerImageName" + case versionAliases = "VersionAliases" + } + } + public struct HolidayConfigAttributes: AWSEncodableShape & AWSDecodableShape { /// The country code for the holiday calendar. For the list of public holiday calendars supported by AutoML job V2, see Country Codes. Use the country code corresponding to the country of your choice. public let countryCode: String? @@ -22878,6 +22945,8 @@ extension SageMaker { public struct JupyterLabAppSettings: AWSEncodableShape & AWSDecodableShape { /// Indicates whether idle shutdown is activated for JupyterLab applications. public let appLifecycleManagement: AppLifecycleManagement? + /// The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default lifecycle configuration. + public let builtInLifecycleConfigArn: String? /// A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application. public let codeRepositories: [CodeRepository]? /// A list of custom SageMaker images that are configured to run as a JupyterLab app. @@ -22889,8 +22958,9 @@ extension SageMaker { public let lifecycleConfigArns: [String]? @inlinable - public init(appLifecycleManagement: AppLifecycleManagement? = nil, codeRepositories: [CodeRepository]? = nil, customImages: [CustomImage]? = nil, defaultResourceSpec: ResourceSpec? = nil, emrSettings: EmrSettings? = nil, lifecycleConfigArns: [String]? = nil) { + public init(appLifecycleManagement: AppLifecycleManagement? = nil, builtInLifecycleConfigArn: String? = nil, codeRepositories: [CodeRepository]? = nil, customImages: [CustomImage]? = nil, defaultResourceSpec: ResourceSpec? = nil, emrSettings: EmrSettings? = nil, lifecycleConfigArns: [String]? = nil) { self.appLifecycleManagement = appLifecycleManagement + self.builtInLifecycleConfigArn = builtInLifecycleConfigArn self.codeRepositories = codeRepositories self.customImages = customImages self.defaultResourceSpec = defaultResourceSpec @@ -22900,6 +22970,8 @@ extension SageMaker { public func validate(name: String) throws { try self.appLifecycleManagement?.validate(name: "\(name).appLifecycleManagement") + try self.validate(self.builtInLifecycleConfigArn, name: "builtInLifecycleConfigArn", parent: name, max: 256) + try self.validate(self.builtInLifecycleConfigArn, name: "builtInLifecycleConfigArn", parent: name, pattern: "^(arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:studio-lifecycle-config/.*|None)$") try self.codeRepositories?.forEach { try $0.validate(name: "\(name).codeRepositories[]") } @@ -22918,6 +22990,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case appLifecycleManagement = "AppLifecycleManagement" + case builtInLifecycleConfigArn = "BuiltInLifecycleConfigArn" case codeRepositories = "CodeRepositories" case customImages = "CustomImages" case defaultResourceSpec = "DefaultResourceSpec" @@ -35356,6 +35429,8 @@ extension SageMaker { public let compressionType: ModelCompressionType? /// Configuration information for hub access. public let hubAccessConfig: InferenceHubAccessConfig? + /// The Amazon S3 URI of the manifest file. The manifest file is a CSV file that stores the artifact locations. + public let manifestS3Uri: String? /// Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model. public let modelAccessConfig: ModelAccessConfig? /// Specifies the type of ML model data to deploy. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix as part of the ML model data to deploy. A valid key name prefix identified by S3Uri always ends with a forward slash (/). If you choose S3Object, S3Uri identifies an object that is the ML model data to deploy. @@ -35364,9 +35439,10 @@ extension SageMaker { public let s3Uri: String? @inlinable - public init(compressionType: ModelCompressionType? = nil, hubAccessConfig: InferenceHubAccessConfig? = nil, modelAccessConfig: ModelAccessConfig? = nil, s3DataType: S3ModelDataType? = nil, s3Uri: String? = nil) { + public init(compressionType: ModelCompressionType? = nil, hubAccessConfig: InferenceHubAccessConfig? = nil, manifestS3Uri: String? = nil, modelAccessConfig: ModelAccessConfig? = nil, s3DataType: S3ModelDataType? = nil, s3Uri: String? = nil) { self.compressionType = compressionType self.hubAccessConfig = hubAccessConfig + self.manifestS3Uri = manifestS3Uri self.modelAccessConfig = modelAccessConfig self.s3DataType = s3DataType self.s3Uri = s3Uri @@ -35374,6 +35450,8 @@ extension SageMaker { public func validate(name: String) throws { try self.hubAccessConfig?.validate(name: "\(name).hubAccessConfig") + try self.validate(self.manifestS3Uri, name: "manifestS3Uri", parent: name, max: 1024) + try self.validate(self.manifestS3Uri, name: "manifestS3Uri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") try self.validate(self.s3Uri, name: "s3Uri", parent: name, max: 1024) try self.validate(self.s3Uri, name: "s3Uri", parent: name, pattern: "^(https|s3)://([^/]+)/?(.*)$") } @@ -35381,6 +35459,7 @@ extension SageMaker { private enum CodingKeys: String, CodingKey { case compressionType = "CompressionType" case hubAccessConfig = "HubAccessConfig" + case manifestS3Uri = "ManifestS3Uri" case modelAccessConfig = "ModelAccessConfig" case s3DataType = "S3DataType" case s3Uri = "S3Uri" @@ -37072,18 +37151,33 @@ extension SageMaker { public struct StudioWebPortalSettings: AWSEncodableShape & AWSDecodableShape { /// The Applications supported in Studio that are hidden from the Studio left navigation pane. public let hiddenAppTypes: [AppType]? + /// The instance types you are hiding from the Studio user interface. + public let hiddenInstanceTypes: [AppInstanceType]? /// The machine learning tools that are hidden from the Studio left navigation pane. public let hiddenMlTools: [MlTools]? + /// The version aliases you are hiding from the Studio user interface. + public let hiddenSageMakerImageVersionAliases: [HiddenSageMakerImage]? @inlinable - public init(hiddenAppTypes: [AppType]? = nil, hiddenMlTools: [MlTools]? = nil) { + public init(hiddenAppTypes: [AppType]? = nil, hiddenInstanceTypes: [AppInstanceType]? = nil, hiddenMlTools: [MlTools]? = nil, hiddenSageMakerImageVersionAliases: [HiddenSageMakerImage]? = nil) { self.hiddenAppTypes = hiddenAppTypes + self.hiddenInstanceTypes = hiddenInstanceTypes self.hiddenMlTools = hiddenMlTools + self.hiddenSageMakerImageVersionAliases = hiddenSageMakerImageVersionAliases + } + + public func validate(name: String) throws { + try self.hiddenSageMakerImageVersionAliases?.forEach { + try $0.validate(name: "\(name).hiddenSageMakerImageVersionAliases[]") + } + try self.validate(self.hiddenSageMakerImageVersionAliases, name: "hiddenSageMakerImageVersionAliases", parent: name, max: 5) } private enum CodingKeys: String, CodingKey { case hiddenAppTypes = "HiddenAppTypes" + case hiddenInstanceTypes = "HiddenInstanceTypes" case hiddenMlTools = "HiddenMlTools" + case hiddenSageMakerImageVersionAliases = "HiddenSageMakerImageVersionAliases" } } @@ -39483,9 +39577,11 @@ extension SageMaker { public let domainSettingsForUpdate: DomainSettingsForUpdate? /// The VPC subnets that Studio uses for communication. If removing subnets, ensure there are no apps in the InService, Pending, or Deleting state. public let subnetIds: [String]? + /// Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED. + public let tagPropagation: TagPropagation? @inlinable - public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainId: String? = nil, domainSettingsForUpdate: DomainSettingsForUpdate? = nil, subnetIds: [String]? = nil) { + public init(appNetworkAccessType: AppNetworkAccessType? = nil, appSecurityGroupManagement: AppSecurityGroupManagement? = nil, defaultSpaceSettings: DefaultSpaceSettings? = nil, defaultUserSettings: UserSettings? = nil, domainId: String? = nil, domainSettingsForUpdate: DomainSettingsForUpdate? = nil, subnetIds: [String]? = nil, tagPropagation: TagPropagation? = nil) { self.appNetworkAccessType = appNetworkAccessType self.appSecurityGroupManagement = appSecurityGroupManagement self.defaultSpaceSettings = defaultSpaceSettings @@ -39493,6 +39589,7 @@ extension SageMaker { self.domainId = domainId self.domainSettingsForUpdate = domainSettingsForUpdate self.subnetIds = subnetIds + self.tagPropagation = tagPropagation } public func validate(name: String) throws { @@ -39517,6 +39614,7 @@ extension SageMaker { case domainId = "DomainId" case domainSettingsForUpdate = "DomainSettingsForUpdate" case subnetIds = "SubnetIds" + case tagPropagation = "TagPropagation" } } @@ -41283,6 +41381,7 @@ extension SageMaker { try self.validate(self.securityGroups, name: "securityGroups", parent: name, max: 5) try self.sharingSettings?.validate(name: "\(name).sharingSettings") try self.spaceStorageSettings?.validate(name: "\(name).spaceStorageSettings") + try self.studioWebPortalSettings?.validate(name: "\(name).studioWebPortalSettings") try self.tensorBoardAppSettings?.validate(name: "\(name).tensorBoardAppSettings") } diff --git a/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_api.swift b/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_api.swift index f15d6278d2..46264bb945 100644 --- a/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_api.swift +++ b/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_api.swift @@ -79,7 +79,36 @@ public struct SageMakerMetrics: AWSService { // MARK: API Calls - /// Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio and retrieved with the GetMetrics API. + /// Used to retrieve training metrics from SageMaker. + @Sendable + @inlinable + public func batchGetMetrics(_ input: BatchGetMetricsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchGetMetricsResponse { + try await self.client.execute( + operation: "BatchGetMetrics", + path: "/BatchGetMetrics", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Used to retrieve training metrics from SageMaker. + /// + /// Parameters: + /// - metricQueries: Queries made to retrieve training metrics from SageMaker. + /// - logger: Logger use during operation + @inlinable + public func batchGetMetrics( + metricQueries: [MetricQuery]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> BatchGetMetricsResponse { + let input = BatchGetMetricsRequest( + metricQueries: metricQueries + ) + return try await self.batchGetMetrics(input, logger: logger) + } + + /// Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio. @Sendable @inlinable public func batchPutMetrics(_ input: BatchPutMetricsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchPutMetricsResponse { @@ -92,11 +121,11 @@ public struct SageMakerMetrics: AWSService { logger: logger ) } - /// Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio and retrieved with the GetMetrics API. + /// Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio. /// /// Parameters: /// - metricData: A list of raw metric values to put. - /// - trialComponentName: The name of the Trial Component to associate with the metrics. + /// - trialComponentName: The name of the Trial Component to associate with the metrics. The Trial Component name must be entirely lowercase. /// - logger: Logger use during operation @inlinable public func batchPutMetrics( diff --git a/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_shapes.swift b/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_shapes.swift index 8bfb29e45e..a0225abec3 100644 --- a/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_shapes.swift +++ b/Sources/Soto/Services/SageMakerMetrics/SageMakerMetrics_shapes.swift @@ -26,6 +26,32 @@ import Foundation extension SageMakerMetrics { // MARK: Enums + public enum MetricQueryResultStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case complete = "Complete" + case internalError = "InternalError" + case truncated = "Truncated" + case validationError = "ValidationError" + public var description: String { return self.rawValue } + } + + public enum MetricStatistic: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case avg = "Avg" + case count = "Count" + case last = "Last" + case max = "Max" + case min = "Min" + case stdDev = "StdDev" + public var description: String { return self.rawValue } + } + + public enum Period: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case fiveMinute = "FiveMinute" + case iterationNumber = "IterationNumber" + case oneHour = "OneHour" + case oneMinute = "OneMinute" + public var description: String { return self.rawValue } + } + public enum PutMetricsErrorCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case conflictError = "CONFLICT_ERROR" case internalError = "INTERNAL_ERROR" @@ -34,8 +60,50 @@ extension SageMakerMetrics { public var description: String { return self.rawValue } } + public enum XAxisType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case iterationNumber = "IterationNumber" + case timestamp = "Timestamp" + public var description: String { return self.rawValue } + } + // MARK: Shapes + public struct BatchGetMetricsRequest: AWSEncodableShape { + /// Queries made to retrieve training metrics from SageMaker. + public let metricQueries: [MetricQuery]? + + @inlinable + public init(metricQueries: [MetricQuery]? = nil) { + self.metricQueries = metricQueries + } + + public func validate(name: String) throws { + try self.metricQueries?.forEach { + try $0.validate(name: "\(name).metricQueries[]") + } + try self.validate(self.metricQueries, name: "metricQueries", parent: name, max: 100) + try self.validate(self.metricQueries, name: "metricQueries", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case metricQueries = "MetricQueries" + } + } + + public struct BatchGetMetricsResponse: AWSDecodableShape { + /// The results of a query to retrieve training metrics from SageMaker. + public let metricQueryResults: [MetricQueryResult]? + + @inlinable + public init(metricQueryResults: [MetricQueryResult]? = nil) { + self.metricQueryResults = metricQueryResults + } + + private enum CodingKeys: String, CodingKey { + case metricQueryResults = "MetricQueryResults" + } + } + public struct BatchPutMetricsError: AWSDecodableShape { /// The error code of an error that occured when attempting to put metrics. METRIC_LIMIT_EXCEEDED: The maximum amount of metrics per resource is exceeded. INTERNAL_ERROR: An internal error occured. VALIDATION_ERROR: The metric data failed validation. CONFLICT_ERROR: Multiple requests attempted to modify the same data simultaneously. public let code: PutMetricsErrorCode? @@ -57,7 +125,7 @@ extension SageMakerMetrics { public struct BatchPutMetricsRequest: AWSEncodableShape { /// A list of raw metric values to put. public let metricData: [RawMetricData]? - /// The name of the Trial Component to associate with the metrics. + /// The name of the Trial Component to associate with the metrics. The Trial Component name must be entirely lowercase. public let trialComponentName: String? @inlinable @@ -74,7 +142,7 @@ extension SageMakerMetrics { try self.validate(self.metricData, name: "metricData", parent: name, min: 1) try self.validate(self.trialComponentName, name: "trialComponentName", parent: name, max: 120) try self.validate(self.trialComponentName, name: "trialComponentName", parent: name, min: 1) - try self.validate(self.trialComponentName, name: "trialComponentName", parent: name, pattern: "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}$") + try self.validate(self.trialComponentName, name: "trialComponentName", parent: name, pattern: "^[a-z0-9](-*[a-z0-9]){0,119}$") } private enum CodingKeys: String, CodingKey { @@ -97,6 +165,78 @@ extension SageMakerMetrics { } } + public struct MetricQuery: AWSEncodableShape { + /// The end time of metrics to retrieve. + public let end: Int64? + /// The name of the metric to retrieve. + public let metricName: String? + /// The metrics stat type of metrics to retrieve. + public let metricStat: MetricStatistic? + /// The time period of metrics to retrieve. + public let period: Period? + /// The ARN of the SageMaker resource to retrieve metrics for. + public let resourceArn: String? + /// The start time of metrics to retrieve. + public let start: Int64? + /// The x-axis type of metrics to retrieve. + public let xAxisType: XAxisType? + + @inlinable + public init(end: Int64? = nil, metricName: String? = nil, metricStat: MetricStatistic? = nil, period: Period? = nil, resourceArn: String? = nil, start: Int64? = nil, xAxisType: XAxisType? = nil) { + self.end = end + self.metricName = metricName + self.metricStat = metricStat + self.period = period + self.resourceArn = resourceArn + self.start = start + self.xAxisType = xAxisType + } + + public func validate(name: String) throws { + try self.validate(self.metricName, name: "metricName", parent: name, max: 255) + try self.validate(self.metricName, name: "metricName", parent: name, min: 1) + try self.validate(self.metricName, name: "metricName", parent: name, pattern: "^.+$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-].*/") + } + + private enum CodingKeys: String, CodingKey { + case end = "End" + case metricName = "MetricName" + case metricStat = "MetricStat" + case period = "Period" + case resourceArn = "ResourceArn" + case start = "Start" + case xAxisType = "XAxisType" + } + } + + public struct MetricQueryResult: AWSDecodableShape { + /// A message describing the status of the metric query. + public let message: String? + /// The metric values retrieved by the query. + public let metricValues: [Double]? + /// The status of the metric query. + public let status: MetricQueryResultStatus? + /// The values for the x-axis of the metrics. + public let xAxisValues: [Int64]? + + @inlinable + public init(message: String? = nil, metricValues: [Double]? = nil, status: MetricQueryResultStatus? = nil, xAxisValues: [Int64]? = nil) { + self.message = message + self.metricValues = metricValues + self.status = status + self.xAxisValues = xAxisValues + } + + private enum CodingKeys: String, CodingKey { + case message = "Message" + case metricValues = "MetricValues" + case status = "Status" + case xAxisValues = "XAxisValues" + } + } + public struct RawMetricData: AWSEncodableShape { /// The name of the metric. public let metricName: String? diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift index 87e7879316..2a5a36b50d 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS SecurityHub service. /// -/// Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices. Security Hub collects security data across Amazon Web Services accounts, Amazon Web Servicesservices, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues. To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices. In addition to generating control findings, Security Hub also receives findings from other Amazon Web Servicesservices, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Servicesservices and supported third-party products. Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings. This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Servicesservices. In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Servicesservices . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services. With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, +/// Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices. Security Hub collects security data across Amazon Web Services accounts, Amazon Web Services services, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues. To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices. In addition to generating control findings, Security Hub also receives findings from other Amazon Web Services services, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Services services and supported third-party products. Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings. This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Services services. In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Services services . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services. With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, /// API requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of /// central configuration operations, see the Central configuration /// terms and concepts section of the Security Hub User Guide. The following throttling limits apply to Security Hub API operations. BatchEnableStandards - RateLimit of 1 request per second. BurstLimit of 1 request per second. GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests per second. BatchImportFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. UpdateStandardsControl - RateLimit of 1 request per second. BurstLimit of 5 requests per second. All other operations - RateLimit of 10 requests per second. BurstLimit of 30 requests per second. @@ -94,7 +94,7 @@ public struct SecurityHub: AWSService { // MARK: API Calls - /// Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from. This operation is only used by member accounts that are not added through Organizations. When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from. This operation is only used by member accounts that are not added through Organizations. When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account. @Sendable @inlinable public func acceptAdministratorInvitation(_ input: AcceptAdministratorInvitationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AcceptAdministratorInvitationResponse { @@ -107,7 +107,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from. This operation is only used by member accounts that are not added through Organizations. When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from. This operation is only used by member accounts that are not added through Organizations. When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account. /// /// Parameters: /// - administratorId: The account ID of the Security Hub administrator account that sent the invitation. @@ -421,7 +421,7 @@ public struct SecurityHub: AWSService { return try await self.batchUpdateAutomationRules(input, logger: logger) } - /// Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. Updates from BatchUpdateFindings do not affect the value of UpdatedAt for a finding. Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects. Confidence Criticality Note RelatedFindings Severity Types UserDefinedFields VerificationState Workflow You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide. + /// Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. Updates from BatchUpdateFindings don't affect the value of UpdatedAt for a finding. Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects. Confidence Criticality Note RelatedFindings Severity Types UserDefinedFields VerificationState Workflow You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide. @Sendable @inlinable public func batchUpdateFindings(_ input: BatchUpdateFindingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> BatchUpdateFindingsResponse { @@ -434,7 +434,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. Updates from BatchUpdateFindings do not affect the value of UpdatedAt for a finding. Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects. Confidence Criticality Note RelatedFindings Severity Types UserDefinedFields VerificationState Workflow You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide. + /// Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. Updates from BatchUpdateFindings don't affect the value of UpdatedAt for a finding. Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects. Confidence Criticality Note RelatedFindings Severity Types UserDefinedFields VerificationState Workflow You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide. /// /// Parameters: /// - confidence: The updated value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify. Confidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence. @@ -629,7 +629,7 @@ public struct SecurityHub: AWSService { return try await self.createConfigurationPolicy(input, logger: logger) } - /// Used to enable finding aggregation. Must be called from the aggregation Region. For more details about cross-Region replication, see Configuring finding aggregation in the Security Hub User Guide. + /// The aggregation Region is now called the home Region. Used to enable cross-Region aggregation. This operation can be invoked from the home Region only. For information about how cross-Region aggregation works, see Understanding cross-Region aggregation in Security Hub in the Security Hub User Guide. @Sendable @inlinable public func createFindingAggregator(_ input: CreateFindingAggregatorRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFindingAggregatorResponse { @@ -642,11 +642,11 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Used to enable finding aggregation. Must be called from the aggregation Region. For more details about cross-Region replication, see Configuring finding aggregation in the Security Hub User Guide. + /// The aggregation Region is now called the home Region. Used to enable cross-Region aggregation. This operation can be invoked from the home Region only. For information about how cross-Region aggregation works, see Understanding cross-Region aggregation in Security Hub in the Security Hub User Guide. /// /// Parameters: /// - regionLinkingMode: Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions. - /// - regions: If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. + /// - regions: If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. /// - logger: Logger use during operation @inlinable public func createFindingAggregator( @@ -696,7 +696,7 @@ public struct SecurityHub: AWSService { return try await self.createInsight(input, logger: logger) } - /// Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account. CreateMembers is always used to add accounts that are not organization members. For accounts that are managed using Organizations, CreateMembers is only used in the following cases: Security Hub is not configured to automatically add new organization accounts. The account was disassociated or deleted in Security Hub. This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation. For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub. Accounts that are managed using Organizations do not receive an invitation. They automatically become a member account in Security Hub. If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account. For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls. A permissions policy is added that permits the administrator account to view the findings generated in the member account. To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation. + /// Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account. CreateMembers is always used to add accounts that are not organization members. For accounts that are managed using Organizations, CreateMembers is only used in the following cases: Security Hub is not configured to automatically add new organization accounts. The account was disassociated or deleted in Security Hub. This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation. For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub. Accounts that are managed using Organizations don't receive an invitation. They automatically become a member account in Security Hub. If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account. For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls. A permissions policy is added that permits the administrator account to view the findings generated in the member account. To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation. @Sendable @inlinable public func createMembers(_ input: CreateMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateMembersResponse { @@ -709,7 +709,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account. CreateMembers is always used to add accounts that are not organization members. For accounts that are managed using Organizations, CreateMembers is only used in the following cases: Security Hub is not configured to automatically add new organization accounts. The account was disassociated or deleted in Security Hub. This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation. For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub. Accounts that are managed using Organizations do not receive an invitation. They automatically become a member account in Security Hub. If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account. For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls. A permissions policy is added that permits the administrator account to view the findings generated in the member account. To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation. + /// Creates a member association in Security Hub between the specified accounts and the account used to make the request, which is the administrator account. If you are integrated with Organizations, then the administrator account is designated by the organization management account. CreateMembers is always used to add accounts that are not organization members. For accounts that are managed using Organizations, CreateMembers is only used in the following cases: Security Hub is not configured to automatically add new organization accounts. The account was disassociated or deleted in Security Hub. This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you can use the EnableSecurityHub operation. For accounts that are not organization members, you create the account association and then send an invitation to the member account. To send the invitation, you use the InviteMembers operation. If the account owner accepts the invitation, the account becomes a member account in Security Hub. Accounts that are managed using Organizations don't receive an invitation. They automatically become a member account in Security Hub. If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account. For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls. A permissions policy is added that permits the administrator account to view the findings generated in the member account. To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation. /// /// Parameters: /// - accountDetails: The list of accounts to associate with the Security Hub administrator account. For each account, the list includes the account ID and optionally the email address. @@ -725,7 +725,7 @@ public struct SecurityHub: AWSService { return try await self.createMembers(input, logger: logger) } - /// Declines invitations to become a member account. A prospective member account uses this operation to decline an invitation to become a member. This operation is only called by member accounts that aren't part of an organization. Organization accounts don't receive invitations. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Declines invitations to become a Security Hub member account. A prospective member account uses this operation to decline an invitation to become a member. Only member accounts that aren't part of an Amazon Web Services organization should use this operation. Organization accounts don't receive invitations. @Sendable @inlinable public func declineInvitations(_ input: DeclineInvitationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeclineInvitationsResponse { @@ -738,7 +738,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Declines invitations to become a member account. A prospective member account uses this operation to decline an invitation to become a member. This operation is only called by member accounts that aren't part of an organization. Organization accounts don't receive invitations. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Declines invitations to become a Security Hub member account. A prospective member account uses this operation to decline an invitation to become a member. Only member accounts that aren't part of an Amazon Web Services organization should use this operation. Organization accounts don't receive invitations. /// /// Parameters: /// - accountIds: The list of prospective member account IDs for which to decline an invitation. @@ -812,7 +812,8 @@ public struct SecurityHub: AWSService { return try await self.deleteConfigurationPolicy(input, logger: logger) } - /// Deletes a finding aggregator. When you delete the finding aggregator, you stop finding aggregation. When you stop finding aggregation, findings that were already aggregated to the aggregation Region are still visible from the aggregation Region. New findings and finding updates are not aggregated. + /// The aggregation Region is now called the home Region. Deletes a finding aggregator. When you delete the finding aggregator, you stop cross-Region aggregation. Finding replication stops + /// occurring from the linked Regions to the home Region. When you stop cross-Region aggregation, findings that were already replicated and sent to the home Region are still visible from the home Region. However, new findings and finding updates are no longer replicated and sent to the home Region. @Sendable @inlinable public func deleteFindingAggregator(_ input: DeleteFindingAggregatorRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFindingAggregatorResponse { @@ -825,7 +826,8 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Deletes a finding aggregator. When you delete the finding aggregator, you stop finding aggregation. When you stop finding aggregation, findings that were already aggregated to the aggregation Region are still visible from the aggregation Region. New findings and finding updates are not aggregated. + /// The aggregation Region is now called the home Region. Deletes a finding aggregator. When you delete the finding aggregator, you stop cross-Region aggregation. Finding replication stops + /// occurring from the linked Regions to the home Region. When you stop cross-Region aggregation, findings that were already replicated and sent to the home Region are still visible from the home Region. However, new findings and finding updates are no longer replicated and sent to the home Region. /// /// Parameters: /// - findingAggregatorArn: The ARN of the finding aggregator to delete. To obtain the ARN, use ListFindingAggregators. @@ -870,7 +872,7 @@ public struct SecurityHub: AWSService { return try await self.deleteInsight(input, logger: logger) } - /// Deletes invitations received by the Amazon Web Services account to become a member account. A Security Hub administrator account can use this operation to delete invitations sent to one or more member accounts. This operation is only used to delete invitations that are sent to member accounts that aren't part of an organization. Organization accounts don't receive invitations. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Deletes invitations to become a Security Hub member account. A Security Hub administrator account can use this operation to delete invitations sent to one or more prospective member accounts. This operation is only used to delete invitations that are sent to prospective member accounts that aren't part of an Amazon Web Services organization. Organization accounts don't receive invitations. @Sendable @inlinable public func deleteInvitations(_ input: DeleteInvitationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteInvitationsResponse { @@ -883,7 +885,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Deletes invitations received by the Amazon Web Services account to become a member account. A Security Hub administrator account can use this operation to delete invitations sent to one or more member accounts. This operation is only used to delete invitations that are sent to member accounts that aren't part of an organization. Organization accounts don't receive invitations. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Deletes invitations to become a Security Hub member account. A Security Hub administrator account can use this operation to delete invitations sent to one or more prospective member accounts. This operation is only used to delete invitations that are sent to prospective member accounts that aren't part of an Amazon Web Services organization. Organization accounts don't receive invitations. /// /// Parameters: /// - accountIds: The list of member account IDs that received the invitations you want to delete. @@ -1018,7 +1020,7 @@ public struct SecurityHub: AWSService { return try await self.describeOrganizationConfiguration(input, logger: logger) } - /// Returns information about product integrations in Security Hub. You can optionally provide an integration ARN. If you provide an integration ARN, then the results only include that integration. If you do not provide an integration ARN, then the results include all of the available product integrations. + /// Returns information about product integrations in Security Hub. You can optionally provide an integration ARN. If you provide an integration ARN, then the results only include that integration. If you don't provide an integration ARN, then the results include all of the available product integrations. @Sendable @inlinable public func describeProducts(_ input: DescribeProductsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeProductsResponse { @@ -1031,7 +1033,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Returns information about product integrations in Security Hub. You can optionally provide an integration ARN. If you provide an integration ARN, then the results only include that integration. If you do not provide an integration ARN, then the results include all of the available product integrations. + /// Returns information about product integrations in Security Hub. You can optionally provide an integration ARN. If you provide an integration ARN, then the results only include that integration. If you don't provide an integration ARN, then the results include all of the available product integrations. /// /// Parameters: /// - maxResults: The maximum number of results to return. @@ -1362,7 +1364,7 @@ public struct SecurityHub: AWSService { /// /// Parameters: /// - controlFindingGenerator: This field, used when enabling Security Hub, specifies whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. The value for this field in a member account matches the value in the administrator account. For accounts that aren't part of an organization, the default value of this field is SECURITY_CONTROL if you enabled Security Hub on or after February 23, 2023. - /// - enableDefaultStandards: Whether to enable the security standards that Security Hub has designated as automatically enabled. If you do not provide a value for EnableDefaultStandards, it is set to true. To not enable the automatically enabled standards, set EnableDefaultStandards to false. + /// - enableDefaultStandards: Whether to enable the security standards that Security Hub has designated as automatically enabled. If you don't provide a value for EnableDefaultStandards, it is set to true. To not enable the automatically enabled standards, set EnableDefaultStandards to false. /// - tags: The tags to add to the hub resource when you enable Security Hub. /// - logger: Logger use during operation @inlinable @@ -1499,7 +1501,8 @@ public struct SecurityHub: AWSService { return try await self.getEnabledStandards(input, logger: logger) } - /// Returns the current finding aggregation configuration. + /// The aggregation Region is now called the home Region. Returns the current configuration in the calling account for cross-Region aggregation. A finding aggregator is a resource that establishes + /// the home Region and any linked Regions. @Sendable @inlinable public func getFindingAggregator(_ input: GetFindingAggregatorRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFindingAggregatorResponse { @@ -1512,7 +1515,8 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Returns the current finding aggregation configuration. + /// The aggregation Region is now called the home Region. Returns the current configuration in the calling account for cross-Region aggregation. A finding aggregator is a resource that establishes + /// the home Region and any linked Regions. /// /// Parameters: /// - findingAggregatorArn: The ARN of the finding aggregator to return details for. To obtain the ARN, use ListFindingAggregators. @@ -1569,7 +1573,7 @@ public struct SecurityHub: AWSService { return try await self.getFindingHistory(input, logger: logger) } - /// Returns a list of findings that match the specified criteria. If finding aggregation is enabled, then when you call GetFindings from the aggregation Region, the results include all of the matching findings from both the aggregation Region and the linked Regions. + /// Returns a list of findings that match the specified criteria. If cross-Region aggregation is enabled, then when you call GetFindings from the home Region, the results include all of the matching findings from both the home Region and linked Regions. @Sendable @inlinable public func getFindings(_ input: GetFindingsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetFindingsResponse { @@ -1582,7 +1586,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Returns a list of findings that match the specified criteria. If finding aggregation is enabled, then when you call GetFindings from the aggregation Region, the results include all of the matching findings from both the aggregation Region and the linked Regions. + /// Returns a list of findings that match the specified criteria. If cross-Region aggregation is enabled, then when you call GetFindings from the home Region, the results include all of the matching findings from both the home Region and linked Regions. /// /// Parameters: /// - filters: The finding attributes used to define a condition to filter the returned findings. You can filter by up to 10 finding attributes. For each attribute, you can provide up to 20 filter values. Note that in the available filter fields, WorkflowState is deprecated. To search for a finding based on its workflow status, use WorkflowStatus. @@ -1652,7 +1656,7 @@ public struct SecurityHub: AWSService { /// Lists and describes insights for the specified insight ARNs. /// /// Parameters: - /// - insightArns: The ARNs of the insights to describe. If you do not provide any insight ARNs, then GetInsights returns all of your custom insights. It does not return any managed insights. + /// - insightArns: The ARNs of the insights to describe. If you don't provide any insight ARNs, then GetInsights returns all of your custom insights. It does not return any managed insights. /// - maxResults: The maximum number of items to return in the response. /// - nextToken: The token that is required for pagination. On your first call to the GetInsights operation, set the value of this parameter to NULL. For subsequent calls to the operation, to continue listing data, set the value of this parameter to the value returned from the previous response. /// - logger: Logger use during operation @@ -1671,7 +1675,7 @@ public struct SecurityHub: AWSService { return try await self.getInsights(input, logger: logger) } - /// Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Returns the count of all Security Hub membership invitations that were sent to the calling member account, not including the currently accepted invitation. @Sendable @inlinable public func getInvitationsCount(_ input: GetInvitationsCountRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetInvitationsCountResponse { @@ -1684,7 +1688,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Returns the count of all Security Hub membership invitations that were sent to the current member account, not including the currently accepted invitation. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Returns the count of all Security Hub membership invitations that were sent to the calling member account, not including the currently accepted invitation. /// /// Parameters: /// - logger: Logger use during operation @@ -1783,7 +1787,7 @@ public struct SecurityHub: AWSService { return try await self.getSecurityControlDefinition(input, logger: logger) } - /// Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from. This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations. Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub. When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated from the member account. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from. This operation is only used to invite accounts that don't belong to an Amazon Web Services organization. Organization accounts don't receive invitations. Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub. When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated in the member account. @Sendable @inlinable public func inviteMembers(_ input: InviteMembersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> InviteMembersResponse { @@ -1796,7 +1800,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from. This operation is only used to invite accounts that do not belong to an organization. Organization accounts do not receive invitations. Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub. When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated from the member account. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that the invitation is sent from. This operation is only used to invite accounts that don't belong to an Amazon Web Services organization. Organization accounts don't receive invitations. Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub. When the account owner enables Security Hub and accepts the invitation to become a member account, the administrator account can view the findings generated in the member account. /// /// Parameters: /// - accountIds: The list of account IDs of the Amazon Web Services accounts to invite to Security Hub as members. @@ -1943,7 +1947,8 @@ public struct SecurityHub: AWSService { return try await self.listEnabledProductsForImport(input, logger: logger) } - /// If finding aggregation is enabled, then ListFindingAggregators returns the ARN of the finding aggregator. You can run this operation from any Region. + /// If cross-Region aggregation is enabled, then ListFindingAggregators returns the Amazon Resource Name (ARN) + /// of the finding aggregator. You can run this operation from any Amazon Web Services Region. @Sendable @inlinable public func listFindingAggregators(_ input: ListFindingAggregatorsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFindingAggregatorsResponse { @@ -1956,7 +1961,8 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// If finding aggregation is enabled, then ListFindingAggregators returns the ARN of the finding aggregator. You can run this operation from any Region. + /// If cross-Region aggregation is enabled, then ListFindingAggregators returns the Amazon Resource Name (ARN) + /// of the finding aggregator. You can run this operation from any Amazon Web Services Region. /// /// Parameters: /// - maxResults: The maximum number of results to return. This operation currently only returns a single result. @@ -1975,7 +1981,7 @@ public struct SecurityHub: AWSService { return try await self.listFindingAggregators(input, logger: logger) } - /// Lists all Security Hub membership invitations that were sent to the current Amazon Web Services account. This operation is only used by accounts that are managed by invitation. Accounts that are managed using the integration with Organizations do not receive invitations. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Lists all Security Hub membership invitations that were sent to the calling account. Only accounts that are managed by invitation can use this operation. Accounts that are managed using the integration with Organizations don't receive invitations. @Sendable @inlinable public func listInvitations(_ input: ListInvitationsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListInvitationsResponse { @@ -1988,7 +1994,7 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Lists all Security Hub membership invitations that were sent to the current Amazon Web Services account. This operation is only used by accounts that are managed by invitation. Accounts that are managed using the integration with Organizations do not receive invitations. + /// We recommend using Organizations instead of Security Hub invitations to manage your member accounts. For information, see Managing Security Hub administrator and member accounts with Organizations in the Security Hub User Guide. Lists all Security Hub membership invitations that were sent to the calling account. Only accounts that are managed by invitation can use this operation. Accounts that are managed using the integration with Organizations don't receive invitations. /// /// Parameters: /// - maxResults: The maximum number of items to return in the response. @@ -2377,7 +2383,7 @@ public struct SecurityHub: AWSService { return try await self.updateConfigurationPolicy(input, logger: logger) } - /// Updates the finding aggregation configuration. Used to update the Region linking mode and the list of included or excluded Regions. You cannot use UpdateFindingAggregator to change the aggregation Region. You must run UpdateFindingAggregator from the current aggregation Region. + /// The aggregation Region is now called the home Region. Updates cross-Region aggregation settings. You can use this operation to update the Region linking mode and the list of included or excluded Amazon Web Services Regions. However, you can't use this operation to change the home Region. You can invoke this operation from the current home Region only. @Sendable @inlinable public func updateFindingAggregator(_ input: UpdateFindingAggregatorRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateFindingAggregatorResponse { @@ -2390,12 +2396,12 @@ public struct SecurityHub: AWSService { logger: logger ) } - /// Updates the finding aggregation configuration. Used to update the Region linking mode and the list of included or excluded Regions. You cannot use UpdateFindingAggregator to change the aggregation Region. You must run UpdateFindingAggregator from the current aggregation Region. + /// The aggregation Region is now called the home Region. Updates cross-Region aggregation settings. You can use this operation to update the Region linking mode and the list of included or excluded Amazon Web Services Regions. However, you can't use this operation to change the home Region. You can invoke this operation from the current home Region only. /// /// Parameters: /// - findingAggregatorArn: The ARN of the finding aggregator. To obtain the ARN, use ListFindingAggregators. /// - regionLinkingMode: Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions. - /// - regions: If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. + /// - regions: If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. /// - logger: Logger use during operation @inlinable public func updateFindingAggregator( @@ -2922,7 +2928,7 @@ extension SecurityHub { /// Return PaginatorSequence for operation ``getInsights(_:logger:)``. /// /// - Parameters: - /// - insightArns: The ARNs of the insights to describe. If you do not provide any insight ARNs, then GetInsights returns all of your custom insights. It does not return any managed insights. + /// - insightArns: The ARNs of the insights to describe. If you don't provide any insight ARNs, then GetInsights returns all of your custom insights. It does not return any managed insights. /// - maxResults: The maximum number of items to return in the response. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift index a8dff86b7e..deaa418c7e 100644 --- a/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift +++ b/Sources/Soto/Services/SecurityHub/SecurityHub_shapes.swift @@ -958,7 +958,7 @@ extension SecurityHub { public struct AutomationRulesAction: AWSEncodableShape & AWSDecodableShape { /// Specifies that the automation rule action is an update to a finding field. public let findingFieldsUpdate: AutomationRulesFindingFieldsUpdate? - /// Specifies that the rule action should update the Types finding field. The Types finding field classifies findings in the format of namespace/category/classifier. For more information, see Types taxonomy for ASFF in the Security Hub User Guide. + /// Specifies the type of action that Security Hub takes when a finding matches the defined criteria of a rule. public let type: AutomationRulesActionType? @inlinable @@ -1158,7 +1158,7 @@ extension SecurityHub { public let resourceApplicationName: [StringFilter]? /// Custom fields and values about the resource that a finding pertains to. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let resourceDetailsOther: [MapFilter]? - /// The identifier for the given resource type. For Amazon Web Services resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, this is the identifier as defined by the Amazon Web Servicesservice that created the resource. For non-Amazon Web Services resources, this is a unique identifier that is associated with the resource. Array Members: Minimum number of 1 item. Maximum number of 100 items. + /// The identifier for the given resource type. For Amazon Web Services resources that are identified by Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, this is the identifier as defined by the Amazon Web Services service that created the resource. For non-Amazon Web Services resources, this is a unique identifier that is associated with the resource. Array Members: Minimum number of 1 item. Maximum number of 100 items. public let resourceId: [StringFilter]? /// The partition in which the resource that the finding pertains to is located. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition. Array Members: Minimum number of 1 item. Maximum number of 20 items. public let resourcePartition: [StringFilter]? @@ -3218,7 +3218,7 @@ extension SecurityHub { public struct AwsBackupBackupPlanRuleCopyActionsDetails: AWSEncodableShape & AWSDecodableShape { /// An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. public let destinationBackupVaultArn: String? - /// Defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. If you do not specify a lifecycle, Backup applies the lifecycle policy of the source backup to the destination backup. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. + /// Defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. If you don't specify a lifecycle, Backup applies the lifecycle policy of the source backup to the destination backup. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. public let lifecycle: AwsBackupBackupPlanLifecycleDetails? @inlinable @@ -3244,7 +3244,7 @@ extension SecurityHub { public let copyActions: [AwsBackupBackupPlanRuleCopyActionsDetails]? /// Specifies whether Backup creates continuous backups capable of point-in-time restore (PITR). public let enableContinuousBackup: Bool? - /// Defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. If you do not specify a lifecycle, Backup applies the lifecycle policy of the source backup to the destination backup. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. + /// Defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. If you don't specify a lifecycle, Backup applies the lifecycle policy of the source backup to the destination backup. Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days. public let lifecycle: AwsBackupBackupPlanLifecycleDetails? /// Uniquely identifies a rule that is used to schedule the backup of a selection of resources. public let ruleId: String? @@ -3303,7 +3303,7 @@ extension SecurityHub { /// They consist of lowercase letters, numbers, and hyphens. public let backupVaultName: String? /// The unique ARN associated with the server-side encryption key. You can specify a key to encrypt your backups from services that support - /// full Backup management. If you do not specify a key, Backup creates an KMS key for you by default. + /// full Backup management. If you don't specify a key, Backup creates an KMS key for you by default. public let encryptionKeyArn: String? /// The Amazon SNS event notifications for the specified backup vault. public let notifications: AwsBackupBackupVaultNotificationsDetails? @@ -9264,7 +9264,7 @@ extension SecurityHub { public let propagateTags: String? /// The ARN of the IAM role that is associated with the service. The role allows the Amazon ECS container agent to register container instances with an Elastic Load Balancing load balancer. public let role: String? - /// The scheduling strategy to use for the service. The REPLICA scheduling strategy places and maintains the desired number of tasks across the cluster. By default, the service scheduler spreads tasks across Availability Zones. Task placement strategies and constraints are used to customize task placement decisions. The DAEMON scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that are specified in the cluster. The service scheduler also evaluates the task placement constraints for running tasks and stops tasks that do not meet the placement constraints. Valid values: REPLICA | DAEMON + /// The scheduling strategy to use for the service. The REPLICA scheduling strategy places and maintains the desired number of tasks across the cluster. By default, the service scheduler spreads tasks across Availability Zones. Task placement strategies and constraints are used to customize task placement decisions. The DAEMON scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that are specified in the cluster. The service scheduler also evaluates the task placement constraints for running tasks and stops tasks that don't meet the placement constraints. Valid values: REPLICA | DAEMON public let schedulingStrategy: String? /// The ARN of the service. public let serviceArn: String? @@ -15039,7 +15039,7 @@ extension SecurityHub { public let dbInstanceStatus: String? /// The Amazon Web Services Region-unique, immutable identifier for the DB instance. This identifier is found in CloudTrail log entries whenever the KMS key for the DB instance is accessed. public let dbiResourceId: String? - /// The meaning of this parameter differs according to the database engine you use. MySQL, MariaDB, SQL Server, PostgreSQL Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance. Oracle Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters do not apply to an Oracle DB instance. + /// The meaning of this parameter differs according to the database engine you use. MySQL, MariaDB, SQL Server, PostgreSQL Contains the name of the initial database of this instance that was provided at create time, if one was specified when the DB instance was created. This same name is returned for the life of the DB instance. Oracle Contains the Oracle System ID (SID) of the created DB instance. Not shown when the returned parameters don't apply to an Oracle DB instance. public let dbName: String? /// A list of the DB parameter groups to assign to the DB instance. public let dbParameterGroups: [AwsRdsDbParameterGroup]? @@ -18232,7 +18232,7 @@ extension SecurityHub { public let companyName: [StringFilter]? /// The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the DescribeStandards API response. public let complianceAssociatedStandardsId: [StringFilter]? - /// The unique identifier of a control across standards. Values for this field typically consist of an Amazon Web Servicesservice and a number, such as APIGateway.5. + /// The unique identifier of a control across standards. Values for this field typically consist of an Amazon Web Services service and a number, such as APIGateway.5. public let complianceSecurityControlId: [StringFilter]? /// The name of a security control parameter. public let complianceSecurityControlParametersName: [StringFilter]? @@ -18444,7 +18444,7 @@ extension SecurityHub { public let vulnerabilitiesFixAvailable: [StringFilter]? /// The workflow state of a finding. Note that this field is deprecated. To search for a finding based on its workflow status, use WorkflowStatus. public let workflowState: [StringFilter]? - /// The status of the investigation into a finding. Allowed values are the following. NEW - The initial state of a finding, before it is reviewed. Security Hub also resets the workflow status from NOTIFIED or RESOLVED to NEW in the following cases: RecordState changes from ARCHIVED to ACTIVE. Compliance.Status changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE. NOTIFIED - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner. If one of the following occurs, the workflow status is changed automatically from NOTIFIED to NEW: RecordState changes from ARCHIVED to ACTIVE. Compliance.Status changes from PASSED to FAILED, WARNING, or NOT_AVAILABLE. SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The workflow status of a SUPPRESSED finding does not change if RecordState changes from ARCHIVED to ACTIVE. RESOLVED - The finding was reviewed and remediated and is now considered resolved. The finding remains RESOLVED unless one of the following occurs: RecordState changes from ARCHIVED to ACTIVE. Compliance.Status changes from PASSED to FAILED, WARNING, or NOT_AVAILABLE. In those cases, the workflow status is automatically reset to NEW. For findings from controls, if Compliance.Status is PASSED, then Security Hub automatically sets the workflow status to RESOLVED. + /// The status of the investigation into a finding. Allowed values are the following. NEW - The initial state of a finding, before it is reviewed. Security Hub also resets the workflow status from NOTIFIED or RESOLVED to NEW in the following cases: RecordState changes from ARCHIVED to ACTIVE. Compliance.Status changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE. NOTIFIED - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner. If one of the following occurs, the workflow status is changed automatically from NOTIFIED to NEW: RecordState changes from ARCHIVED to ACTIVE. Compliance.Status changes from PASSED to FAILED, WARNING, or NOT_AVAILABLE. SUPPRESSED - Indicates that you reviewed the finding and don't believe that any action is needed. The workflow status of a SUPPRESSED finding does not change if RecordState changes from ARCHIVED to ACTIVE. RESOLVED - The finding was reviewed and remediated and is now considered resolved. The finding remains RESOLVED unless one of the following occurs: RecordState changes from ARCHIVED to ACTIVE. Compliance.Status changes from PASSED to FAILED, WARNING, or NOT_AVAILABLE. In those cases, the workflow status is automatically reset to NEW. For findings from controls, if Compliance.Status is PASSED, then Security Hub automatically sets the workflow status to RESOLVED. public let workflowStatus: [StringFilter]? @inlinable @@ -20054,9 +20054,9 @@ extension SecurityHub { public let action: WafAction? /// Rules to exclude from a rule group. public let excludedRules: [WafExcludedRule]? - /// Use the OverrideAction to test your RuleGroup. Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None, the RuleGroup blocks a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However, if you first want to test the RuleGroup, set the OverrideAction to Count. The RuleGroup then overrides any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests are counted. ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a web ACL. In this case you do not use ActivatedRule Action. For all other update requests, ActivatedRule Action is used instead of ActivatedRule OverrideAction. + /// Use the OverrideAction to test your RuleGroup. Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to None, the RuleGroup blocks a request if any individual rule in the RuleGroup matches the request and is configured to block that request. However, if you first want to test the RuleGroup, set the OverrideAction to Count. The RuleGroup then overrides any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests are counted. ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to a web ACL. In this case you don't use ActivatedRule Action. For all other update requests, ActivatedRule Action is used instead of ActivatedRule OverrideAction. public let overrideAction: WafOverrideAction? - /// Specifies the order in which the rules in a web ACL are evaluated. Rules with a lower value for Priority are evaluated before rules with a higher value. The value must be a unique integer. If you add multiple rules to a web ACL, the values do not need to be consecutive. + /// Specifies the order in which the rules in a web ACL are evaluated. Rules with a lower value for Priority are evaluated before rules with a higher value. The value must be a unique integer. If you add multiple rules to a web ACL, the values don't need to be consecutive. public let priority: Int? /// The identifier for a rule. public let ruleId: String? @@ -21260,7 +21260,7 @@ extension SecurityHub { public let associatedStandards: [AssociatedStandard]? /// Typically provides the industry or regulatory framework requirements that are related to a control. The check for that control is aligned with these requirements. Array Members: Maximum number of 32 items. public let relatedRequirements: [String]? - /// Typically provides the unique identifier of a control across standards. For Security Hub controls, this field consists of an Amazon Web Servicesservice and a unique number, such as APIGateway.5. + /// Typically provides the unique identifier of a control across standards. For Security Hub controls, this field consists of an Amazon Web Services service and a unique number, such as APIGateway.5. public let securityControlId: String? /// Typically an object that includes security control parameter names and values. public let securityControlParameters: [SecurityControlParameter]? @@ -21674,7 +21674,7 @@ extension SecurityHub { public struct CreateFindingAggregatorRequest: AWSEncodableShape { /// Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions. public let regionLinkingMode: String? - /// If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. + /// If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. public let regions: [String]? @inlinable @@ -21697,9 +21697,9 @@ extension SecurityHub { } public struct CreateFindingAggregatorResponse: AWSDecodableShape { - /// The aggregation Region. + /// The home Region. Findings generated in linked Regions are replicated and sent to the home Region. public let findingAggregationRegion: String? - /// The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and stop finding aggregation. + /// The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and stop cross-Region aggregation. public let findingAggregatorArn: String? /// Indicates whether to link all Regions, all Regions except for a list of excluded Regions, or a list of included Regions. public let regionLinkingMode: String? @@ -22665,7 +22665,7 @@ extension SecurityHub { public struct EnableSecurityHubRequest: AWSEncodableShape { /// This field, used when enabling Security Hub, specifies whether the calling account has consolidated control findings turned on. If the value for this field is set to SECURITY_CONTROL, Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards. If the value for this field is set to STANDARD_CONTROL, Security Hub generates separate findings for a control check when the check applies to multiple enabled standards. The value for this field in a member account matches the value in the administrator account. For accounts that aren't part of an organization, the default value of this field is SECURITY_CONTROL if you enabled Security Hub on or after February 23, 2023. public let controlFindingGenerator: ControlFindingGenerator? - /// Whether to enable the security standards that Security Hub has designated as automatically enabled. If you do not provide a value for EnableDefaultStandards, it is set to true. To not enable the automatically enabled standards, set EnableDefaultStandards to false. + /// Whether to enable the security standards that Security Hub has designated as automatically enabled. If you don't provide a value for EnableDefaultStandards, it is set to true. To not enable the automatically enabled standards, set EnableDefaultStandards to false. public let enableDefaultStandards: Bool? /// The tags to add to the hub resource when you enable Security Hub. public let tags: [String: String]? @@ -22798,7 +22798,7 @@ extension SecurityHub { public let nextToken: String? /// An array of objects that provides details about the finding change event, including the Amazon Web Services Security Finding Format (ASFF) field that changed, the value of the field before the change, and the value of the field after the change. public let updates: [FindingHistoryUpdate]? - /// Identifies the source of the event that changed the finding. For example, an integrated Amazon Web Servicesservice or third-party partner integration may call BatchImportFindings , or an Security Hub customer may call BatchUpdateFindings . + /// Identifies the source of the event that changed the finding. For example, an integrated Amazon Web Services service or third-party partner integration may call BatchImportFindings , or an Security Hub customer may call BatchUpdateFindings . public let updateSource: FindingHistoryUpdateSource? /// A timestamp that indicates when Security Hub processed the updated finding record. This field accepts only the specified formats. Timestamps /// can end with Z or ("+" / "-") time-hour [":" time-minute]. The time-secfrac after seconds is limited @@ -22851,7 +22851,7 @@ extension SecurityHub { public struct FindingHistoryUpdateSource: AWSDecodableShape { /// The identity of the source that initiated the finding change event. For example, the Amazon Resource Name (ARN) of a partner that calls BatchImportFindings or of a customer that calls BatchUpdateFindings. public let identity: String? - /// Describes the type of finding change event, such as a call to BatchImportFindings (by an integrated Amazon Web Servicesservice or third party partner integration) or BatchUpdateFindings (by a Security Hub customer). + /// Describes the type of finding change event, such as a call to BatchImportFindings (by an integrated Amazon Web Services service or third party partner integration) or BatchUpdateFindings (by a Security Hub customer). public let type: FindingHistoryUpdateSourceType? @inlinable @@ -23300,7 +23300,7 @@ extension SecurityHub { } public struct GetFindingAggregatorResponse: AWSDecodableShape { - /// The aggregation Region. + /// The home Region. Findings generated in linked Regions are replicated and sent to the home Region. public let findingAggregationRegion: String? /// The ARN of the finding aggregator. public let findingAggregatorArn: String? @@ -23474,7 +23474,7 @@ extension SecurityHub { } public struct GetInsightsRequest: AWSEncodableShape { - /// The ARNs of the insights to describe. If you do not provide any insight ARNs, then GetInsights returns all of your custom insights. It does not return any managed insights. + /// The ARNs of the insights to describe. If you don't provide any insight ARNs, then GetInsights returns all of your custom insights. It does not return any managed insights. public let insightArns: [String]? /// The maximum number of items to return in the response. public let maxResults: Int? @@ -25957,7 +25957,7 @@ extension SecurityHub { public let destinationCidrBlock: String? /// The IPv6 CIDR block used for the destination match. public let destinationIpv6CidrBlock: String? - /// The prefix of the destination Amazon Web Servicesservice. + /// The prefix of the destination Amazon Web Services service. public let destinationPrefixListId: String? /// The ID of the egress-only internet gateway. public let egressOnlyInternetGatewayId: String? @@ -26545,7 +26545,7 @@ extension SecurityHub { public let remediationUrl: String? /// The Amazon Resource Name (ARN) for a security control across standards, such as arn:aws:securityhub:eu-central-1:123456789012:security-control/S3.1. This parameter doesn't mention a specific standard. public let securityControlArn: String? - /// The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Servicesservice name and a number, such as APIGateway.3. + /// The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Services service name and a number, such as APIGateway.3. public let securityControlId: String? /// The enablement status of a security control in a specific standard. public let securityControlStatus: ControlStatus? @@ -26624,7 +26624,7 @@ extension SecurityHub { public let parameterDefinitions: [String: ParameterDefinition]? /// A link to Security Hub documentation that explains how to remediate a failed finding for a security control. public let remediationUrl: String? - /// The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Servicesservice name and a number (for example, APIGateway.3). This parameter differs from SecurityControlArn, which is a unique Amazon Resource Name (ARN) assigned to a control. The ARN references the security control ID (for example, arn:aws:securityhub:eu-central-1:123456789012:security-control/APIGateway.3). + /// The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Services service name and a number (for example, APIGateway.3). This parameter differs from SecurityControlArn, which is a unique Amazon Resource Name (ARN) assigned to a control. The ARN references the security control ID (for example, arn:aws:securityhub:eu-central-1:123456789012:security-control/APIGateway.3). public let securityControlId: String? /// The severity of a security control. For more information about how Security Hub determines control severity, see Assigning severity to control findings in the Security Hub User Guide. public let severityRating: SeverityRating? @@ -26800,9 +26800,9 @@ extension SecurityHub { } public struct Severity: AWSEncodableShape & AWSDecodableShape { - /// The severity value of the finding. The allowed values are the following. INFORMATIONAL - No issue was found. LOW - The issue does not require action on its own. MEDIUM - The issue must be addressed but not urgently. HIGH - The issue must be addressed as a priority. CRITICAL - The issue must be remediated immediately to avoid it escalating. If you provide Normalized and do not provide Label, then Label is set automatically as follows. 0 - INFORMATIONAL 1–39 - LOW 40–69 - MEDIUM 70–89 - HIGH 90–100 - CRITICAL + /// The severity value of the finding. The allowed values are the following. INFORMATIONAL - No issue was found. LOW - The issue does not require action on its own. MEDIUM - The issue must be addressed but not urgently. HIGH - The issue must be addressed as a priority. CRITICAL - The issue must be remediated immediately to avoid it escalating. If you provide Normalized and don't provide Label, then Label is set automatically as follows. 0 - INFORMATIONAL 1–39 - LOW 40–69 - MEDIUM 70–89 - HIGH 90–100 - CRITICAL public let label: SeverityLabel? - /// Deprecated. The normalized severity of a finding. Instead of providing Normalized, provide Label. The value of Normalized can be an integer between 0 and 100. If you provide Label and do not provide Normalized, then Normalized is set automatically as follows. INFORMATIONAL - 0 LOW - 1 MEDIUM - 40 HIGH - 70 CRITICAL - 90 + /// Deprecated. The normalized severity of a finding. Instead of providing Normalized, provide Label. The value of Normalized can be an integer between 0 and 100. If you provide Label and don't provide Normalized, then Normalized is set automatically as follows. INFORMATIONAL - 0 LOW - 1 MEDIUM - 40 HIGH - 70 CRITICAL - 90 public let normalized: Int? /// The native severity from the finding product that generated the finding. Length Constraints: Minimum length of 1. Maximum length of 64. public let original: String? @@ -26832,7 +26832,7 @@ extension SecurityHub { public struct SeverityUpdate: AWSEncodableShape & AWSDecodableShape { /// The severity value of the finding. The allowed values are the following. INFORMATIONAL - No issue was found. LOW - The issue does not require action on its own. MEDIUM - The issue must be addressed but not urgently. HIGH - The issue must be addressed as a priority. CRITICAL - The issue must be remediated immediately to avoid it escalating. public let label: SeverityLabel? - /// The normalized severity for the finding. This attribute is to be deprecated in favor of Label. If you provide Normalized and do not provide Label, Label is set automatically as follows. 0 - INFORMATIONAL 1–39 - LOW 40–69 - MEDIUM 70–89 - HIGH 90–100 - CRITICAL + /// The normalized severity for the finding. This attribute is to be deprecated in favor of Label. If you provide Normalized and don't provide Label, Label is set automatically as follows. 0 - INFORMATIONAL 1–39 - LOW 40–69 - MEDIUM 70–89 - HIGH 90–100 - CRITICAL public let normalized: Int? /// The native severity as defined by the Amazon Web Services service or integrated partner product that generated the finding. public let product: Double? @@ -27034,7 +27034,7 @@ extension SecurityHub { public let relatedRequirements: [String]? /// The ARN of a security control across standards, such as arn:aws:securityhub:eu-central-1:123456789012:security-control/S3.1. This parameter doesn't mention a specific standard. public let securityControlArn: String? - /// The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Servicesservice name and a number, such as APIGateway.3. + /// The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Services service name and a number, such as APIGateway.3. public let securityControlId: String? /// The Amazon Resource Name (ARN) of a security standard. public let standardsArn: String? @@ -27108,7 +27108,7 @@ extension SecurityHub { public let relatedRequirements: [String]? /// The ARN of a control, such as arn:aws:securityhub:eu-central-1:123456789012:security-control/S3.1. This parameter doesn't mention a specific standard. public let securityControlArn: String? - /// A unique standard-agnostic identifier for a control. Values for this field typically consist of an Amazon Web Servicesservice and a number, such as APIGateway.5. This field doesn't reference a specific standard. + /// A unique standard-agnostic identifier for a control. Values for this field typically consist of an Amazon Web Services service and a number, such as APIGateway.5. This field doesn't reference a specific standard. public let securityControlId: String? /// The Amazon Resource Name (ARN) of a standard. public let standardsArn: String? @@ -27961,7 +27961,7 @@ extension SecurityHub { public let findingAggregatorArn: String? /// Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the Regions parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates findings only from the Regions listed in the Regions parameter. Security Hub does not automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data because no Regions are selected as linked Regions. public let regionLinkingMode: String? - /// If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. + /// If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region. An InvalidInputException error results if you populate this field while RegionLinkingMode is NO_REGIONS. public let regions: [String]? @inlinable @@ -27987,7 +27987,7 @@ extension SecurityHub { } public struct UpdateFindingAggregatorResponse: AWSDecodableShape { - /// The aggregation Region. + /// The home Region. Findings generated in linked Regions are replicated and sent to the home Region. public let findingAggregationRegion: String? /// The ARN of the finding aggregator. public let findingAggregatorArn: String? @@ -28502,7 +28502,7 @@ extension SecurityHub { } public struct Workflow: AWSEncodableShape & AWSDecodableShape { - /// The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue. The allowed values are the following. NEW - The initial state of a finding, before it is reviewed. Security Hub also resets the workflow status from NOTIFIED or RESOLVED to NEW in the following cases: RecordState changes from ARCHIVED to ACTIVE. ComplianceStatus changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE. NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner. SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated. RESOLVED - The finding was reviewed and remediated and is now considered resolved. + /// The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue. The allowed values are the following. NEW - The initial state of a finding, before it is reviewed. Security Hub also resets the workflow status from NOTIFIED or RESOLVED to NEW in the following cases: RecordState changes from ARCHIVED to ACTIVE. ComplianceStatus changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE. NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner. SUPPRESSED - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated. RESOLVED - The finding was reviewed and remediated and is now considered resolved. public let status: WorkflowStatus? @inlinable @@ -28516,7 +28516,7 @@ extension SecurityHub { } public struct WorkflowUpdate: AWSEncodableShape & AWSDecodableShape { - /// The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue. The allowed values are the following. NEW - The initial state of a finding, before it is reviewed. Security Hub also resets WorkFlowStatus from NOTIFIED or RESOLVED to NEW in the following cases: The record state changes from ARCHIVED to ACTIVE. The compliance status changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE. NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner. RESOLVED - The finding was reviewed and remediated and is now considered resolved. SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated. + /// The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue. The allowed values are the following. NEW - The initial state of a finding, before it is reviewed. Security Hub also resets WorkFlowStatus from NOTIFIED or RESOLVED to NEW in the following cases: The record state changes from ARCHIVED to ACTIVE. The compliance status changes from PASSED to either WARNING, FAILED, or NOT_AVAILABLE. NOTIFIED - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner. RESOLVED - The finding was reviewed and remediated and is now considered resolved. SUPPRESSED - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated. public let status: WorkflowStatus? @inlinable @@ -28530,7 +28530,7 @@ extension SecurityHub { } public struct Policy: AWSEncodableShape & AWSDecodableShape { - /// The Amazon Web Servicesservice that the configuration policy applies to. + /// The Amazon Web Services service that the configuration policy applies to. public let securityHub: SecurityHubPolicy? @inlinable diff --git a/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift b/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift index cd70dacc26..c5dd56187f 100644 --- a/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift +++ b/Sources/Soto/Services/SecurityLake/SecurityLake_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS SecurityLake service. /// -/// Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics. +/// Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to automatically centralize security data from cloud, on-premises, and custom sources into a data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations is an account management service that lets you consolidate multiple Amazon Web Services accounts into an organization that you create and centrally manage. With Organizations, you can create member accounts and invite existing accounts to join your organization. Security Lake helps you analyze security data for a more complete understanding of your security posture across the entire organization. It can also help you improve the protection of your workloads, applications, and data. The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you retain ownership over your data. Amazon Security Lake integrates with CloudTrail, a service that provides a record of actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls from the Security Lake console and code calls to the Security Lake API operations. If you create a trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still view the most recent events in the CloudTrail console in Event history. Using the information collected by CloudTrail you can determine the request that was made to Security Lake, the IP address from which the request was made, who made the request, when it was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide. Security Lake automates the collection of security-related log and event data from integrated Amazon Web Services services and third-party services. It also helps you manage the lifecycle of data with customizable retention and replication settings. Security Lake converts ingested data into Apache Parquet format and a standard open-source schema called the Open Cybersecurity Schema Framework (OCSF). Other Amazon Web Services services and third-party services can subscribe to the data that's stored in Security Lake for incident response and security data analytics. public struct SecurityLake: AWSService { // MARK: Member variables @@ -91,7 +91,7 @@ public struct SecurityLake: AWSService { // MARK: API Calls - /// Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source. + /// Adds a natively supported Amazon Web Services service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Services service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services services as a source. Use CreateCustomLogSource to enable data collection from a custom source. @Sendable @inlinable public func createAwsLogSource(_ input: CreateAwsLogSourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateAwsLogSourceResponse { @@ -104,7 +104,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services as a source. Use CreateCustomLogSource to enable data collection from a custom source. + /// Adds a natively supported Amazon Web Services service as an Amazon Security Lake source. Enables source types for member accounts in required Amazon Web Services Regions, based on the parameters you specify. You can choose any source type in any Region for either accounts that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Services service as a source, Security Lake starts collecting logs and events from it. You can use this API only to enable natively supported Amazon Web Services services as a source. Use CreateCustomLogSource to enable data collection from a custom source. /// /// Parameters: /// - sources: Specify the natively-supported Amazon Web Services service to add as a source in Security Lake. @@ -136,9 +136,9 @@ public struct SecurityLake: AWSService { /// Adds a third-party custom source in Amazon Security Lake, from the Amazon Web Services Region where you want to create a custom source. Security Lake can collect logs and events from third-party custom sources. After creating the appropriate IAM role to invoke Glue crawler, use this API to add a custom source name in Security Lake. This operation creates a partition in the Amazon S3 bucket for Security Lake as the target location for log files from the custom source. In addition, this operation also creates an associated Glue table and an Glue crawler. /// /// Parameters: - /// - configuration: The configuration for the third-party custom source. + /// - configuration: The configuration used for the third-party custom source. /// - eventClasses: The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY KERNEL_EXTENSION MEMORY_ACTIVITY MODULE_ACTIVITY PROCESS_ACTIVITY REGISTRY_KEY_ACTIVITY REGISTRY_VALUE_ACTIVITY RESOURCE_ACTIVITY SCHEDULED_JOB_ACTIVITY SECURITY_FINDING ACCOUNT_CHANGE AUTHENTICATION AUTHORIZATION ENTITY_MANAGEMENT_AUDIT DHCP_ACTIVITY NETWORK_ACTIVITY DNS_ACTIVITY FTP_ACTIVITY HTTP_ACTIVITY RDP_ACTIVITY SMB_ACTIVITY SSH_ACTIVITY CONFIG_STATE INVENTORY_INFO EMAIL_ACTIVITY API_ACTIVITY CLOUD_API - /// - sourceName: Specify the name for a third-party custom source. This must be a Regionally unique value. + /// - sourceName: Specify the name for a third-party custom source. This must be a Regionally unique value. The sourceName you enter here, is used in the LogProviderRole name which follows the convention AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must use a CustomLogSource name that is shorter than or equal to 20 characters. This ensures that the LogProviderRole name is below the 64 character limit. /// - sourceVersion: Specify the source version for the third-party custom source, to limit log collection to a specific version of custom data source. /// - logger: Logger use during operation @inlinable @@ -158,7 +158,7 @@ public struct SecurityLake: AWSService { return try await self.createCustomLogSource(input, logger: logger) } - /// Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide. + /// Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call and after you create subscribers using the CreateSubscriber API. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide. @Sendable @inlinable public func createDataLake(_ input: CreateDataLakeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataLakeResponse { @@ -171,7 +171,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide. + /// Initializes an Amazon Security Lake instance with the provided (or default) configuration. You can enable Security Lake in Amazon Web Services Regions with customized settings before enabling log collection in Regions. To specify particular Regions, configure these Regions using the configurations parameter. If you have already enabled Security Lake in a Region when you call this command, the command will update the Region if you provide new configuration parameters. If you have not already enabled Security Lake in the Region when you call this API, it will set up the data lake in the Region with the specified configurations. When you enable Security Lake, it starts ingesting security data after the CreateAwsLogSource call and after you create subscribers using the CreateSubscriber API. This includes ingesting security data from sources, storing data, and making data accessible to subscribers. Security Lake also enables all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For more information, see the Amazon Security Lake User Guide. /// /// Parameters: /// - configurations: Specify the Region or Regions that will contribute data to the rollup region. @@ -193,7 +193,7 @@ public struct SecurityLake: AWSService { return try await self.createDataLake(input, logger: logger) } - /// Creates the specified notification subscription in Amazon Security Lake for the organization you specify. + /// Creates the specified notification subscription in Amazon Security Lake for the organization you specify. The notification subscription is created for exceptions that cannot be resolved by Security Lake automatically. @Sendable @inlinable public func createDataLakeExceptionSubscription(_ input: CreateDataLakeExceptionSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataLakeExceptionSubscriptionResponse { @@ -206,10 +206,10 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Creates the specified notification subscription in Amazon Security Lake for the organization you specify. + /// Creates the specified notification subscription in Amazon Security Lake for the organization you specify. The notification subscription is created for exceptions that cannot be resolved by Security Lake automatically. /// /// Parameters: - /// - exceptionTimeToLive: The expiration period and time-to-live (TTL). + /// - exceptionTimeToLive: The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains. /// - notificationEndpoint: The Amazon Web Services account where you want to receive exception notifications. /// - subscriptionProtocol: The subscription protocol to which exception notifications are posted. /// - logger: Logger use during operation @@ -257,7 +257,7 @@ public struct SecurityLake: AWSService { return try await self.createDataLakeOrganizationConfiguration(input, logger: logger) } - /// Creates a subscription permission for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region. + /// Creates a subscriber for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region. @Sendable @inlinable public func createSubscriber(_ input: CreateSubscriberRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateSubscriberResponse { @@ -270,11 +270,11 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Creates a subscription permission for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region. + /// Creates a subscriber for accounts that are already enabled in Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region. /// /// Parameters: /// - accessTypes: The Amazon S3 or Lake Formation access type. - /// - sources: The supported Amazon Web Services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services. + /// - sources: The supported Amazon Web Services services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services services. /// - subscriberDescription: The description for your subscriber account in Security Lake. /// - subscriberIdentity: The Amazon Web Services identity used to access your data. /// - subscriberName: The name of your Security Lake subscriber account. @@ -333,7 +333,7 @@ public struct SecurityLake: AWSService { return try await self.createSubscriberNotification(input, logger: logger) } - /// Removes a natively supported Amazon Web Service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal. You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts. + /// Removes a natively supported Amazon Web Services service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal. You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts. @Sendable @inlinable public func deleteAwsLogSource(_ input: DeleteAwsLogSourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteAwsLogSourceResponse { @@ -346,7 +346,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Removes a natively supported Amazon Web Service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal. You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts. + /// Removes a natively supported Amazon Web Services service as an Amazon Security Lake source. You can remove a source for one or more Regions. When you remove the source, Security Lake stops collecting data from that source in the specified Regions and accounts, and subscribers can no longer consume new data from the source. However, subscribers can still consume data that Security Lake collected from the source before removal. You can choose any source type in any Amazon Web Services Region for either accounts that are part of a trusted organization or standalone accounts. /// /// Parameters: /// - sources: Specify the natively-supported Amazon Web Services service to remove as a source in Security Lake. @@ -507,7 +507,7 @@ public struct SecurityLake: AWSService { return try await self.deleteSubscriber(input, logger: logger) } - /// Deletes the specified notification subscription in Amazon Security Lake for the organization you specify. + /// Deletes the specified subscription notification in Amazon Security Lake for the organization you specify. @Sendable @inlinable public func deleteSubscriberNotification(_ input: DeleteSubscriberNotificationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteSubscriberNotificationResponse { @@ -520,7 +520,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Deletes the specified notification subscription in Amazon Security Lake for the organization you specify. + /// Deletes the specified subscription notification in Amazon Security Lake for the organization you specify. /// /// Parameters: /// - subscriberId: The ID of the Security Lake subscriber account. @@ -562,7 +562,7 @@ public struct SecurityLake: AWSService { return try await self.deregisterDataLakeDelegatedAdministrator(input, logger: logger) } - /// Retrieves the details of exception notifications for the account in Amazon Security Lake. + /// Retrieves the protocol and endpoint that were provided when subscribing to Amazon SNS topics for exception notifications. @Sendable @inlinable public func getDataLakeExceptionSubscription(_ input: GetDataLakeExceptionSubscriptionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataLakeExceptionSubscriptionResponse { @@ -575,7 +575,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Retrieves the details of exception notifications for the account in Amazon Security Lake. + /// Retrieves the protocol and endpoint that were provided when subscribing to Amazon SNS topics for exception notifications. /// /// Parameters: /// - logger: Logger use during operation @@ -694,8 +694,8 @@ public struct SecurityLake: AWSService { /// Lists the Amazon Security Lake exceptions that you can use to find the source of problems and fix them. /// /// Parameters: - /// - maxResults: List the maximum number of failures in Security Lake. - /// - nextToken: List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// - maxResults: Lists the maximum number of failures in Security Lake. + /// - nextToken: Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. /// - regions: The Amazon Web Services Regions from which exceptions are retrieved. /// - logger: Logger use during operation @inlinable @@ -742,7 +742,7 @@ public struct SecurityLake: AWSService { return try await self.listDataLakes(input, logger: logger) } - /// Retrieves the log sources in the current Amazon Web Services Region. + /// Retrieves the log sources. @Sendable @inlinable public func listLogSources(_ input: ListLogSourcesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListLogSourcesResponse { @@ -755,7 +755,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Retrieves the log sources in the current Amazon Web Services Region. + /// Retrieves the log sources. /// /// Parameters: /// - accounts: The list of Amazon Web Services accounts for which log sources are displayed. @@ -783,7 +783,7 @@ public struct SecurityLake: AWSService { return try await self.listLogSources(input, logger: logger) } - /// List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account. + /// Lists all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account. @Sendable @inlinable public func listSubscribers(_ input: ListSubscribersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSubscribersResponse { @@ -796,7 +796,7 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account. + /// Lists all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list of subscriptions associated with a specific organization or Amazon Web Services account. /// /// Parameters: /// - maxResults: The maximum number of accounts for which the configuration is displayed. @@ -937,7 +937,7 @@ public struct SecurityLake: AWSService { return try await self.untagResource(input, logger: logger) } - /// Specifies where to store your security data and for how long. You can add a rollup Region to consolidate data from multiple Amazon Web Services Regions. + /// You can use UpdateDataLake to specify where to store your security data, how it should be encrypted at rest and for how long. You can add a Rollup Region to consolidate data from multiple Amazon Web Services Regions, replace default encryption (SSE-S3) with Customer Manged Key, or specify transition and expiration actions through storage Lifecycle management. The UpdateDataLake API works as an "upsert" operation that performs an insert if the specified item or record does not exist, or an update if it already exists. Security Lake securely stores your data at rest using Amazon Web Services encryption solutions. For more details, see Data protection in Amazon Security Lake. For example, omitting the key encryptionConfiguration from a Region that is included in an update call that currently uses KMS will leave that Region's KMS key in place, but specifying encryptionConfiguration: {kmsKeyId: 'S3_MANAGED_KEY'} for that same Region will reset the key to S3-managed. For more details about lifecycle management and how to update retention settings for one or more Regions after enabling Security Lake, see the Amazon Security Lake User Guide. @Sendable @inlinable public func updateDataLake(_ input: UpdateDataLakeRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataLakeResponse { @@ -950,10 +950,10 @@ public struct SecurityLake: AWSService { logger: logger ) } - /// Specifies where to store your security data and for how long. You can add a rollup Region to consolidate data from multiple Amazon Web Services Regions. + /// You can use UpdateDataLake to specify where to store your security data, how it should be encrypted at rest and for how long. You can add a Rollup Region to consolidate data from multiple Amazon Web Services Regions, replace default encryption (SSE-S3) with Customer Manged Key, or specify transition and expiration actions through storage Lifecycle management. The UpdateDataLake API works as an "upsert" operation that performs an insert if the specified item or record does not exist, or an update if it already exists. Security Lake securely stores your data at rest using Amazon Web Services encryption solutions. For more details, see Data protection in Amazon Security Lake. For example, omitting the key encryptionConfiguration from a Region that is included in an update call that currently uses KMS will leave that Region's KMS key in place, but specifying encryptionConfiguration: {kmsKeyId: 'S3_MANAGED_KEY'} for that same Region will reset the key to S3-managed. For more details about lifecycle management and how to update retention settings for one or more Regions after enabling Security Lake, see the Amazon Security Lake User Guide. /// /// Parameters: - /// - configurations: Specify the Region or Regions that will contribute data to the rollup region. + /// - configurations: Specifies the Region or Regions that will contribute data to the rollup region. /// - metaStoreManagerRoleArn: The Amazon Resource Name (ARN) used to create and update the Glue table. This table contains partitions generated by the ingestion and normalization of Amazon Web Services log sources and custom sources. /// - logger: Logger use during operation @inlinable @@ -985,7 +985,7 @@ public struct SecurityLake: AWSService { /// Updates the specified notification subscription in Amazon Security Lake for the organization you specify. /// /// Parameters: - /// - exceptionTimeToLive: The time-to-live (TTL) for the exception message to remain. + /// - exceptionTimeToLive: The time-to-live (TTL) for the exception message to remain. It is the duration of time until which the exception message remains. /// - notificationEndpoint: The account that is subscribed to receive exception notifications. /// - subscriptionProtocol: The subscription protocol to which exception messages are posted. /// - logger: Logger use during operation @@ -1020,10 +1020,10 @@ public struct SecurityLake: AWSService { /// Updates an existing subscription for the given Amazon Security Lake account ID. You can update a subscriber by changing the sources that the subscriber consumes data from. /// /// Parameters: - /// - sources: The supported Amazon Web Services from which logs and events are collected. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide. + /// - sources: The supported Amazon Web Services services from which logs and events are collected. For the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide. /// - subscriberDescription: The description of the Security Lake account subscriber. /// - subscriberId: A value created by Security Lake that uniquely identifies your subscription. - /// - subscriberIdentity: The AWS identity used to access your data. + /// - subscriberIdentity: The Amazon Web Services identity used to access your data. /// - subscriberName: The name of the Security Lake account subscriber. /// - logger: Logger use during operation @inlinable @@ -1149,7 +1149,7 @@ extension SecurityLake { /// Return PaginatorSequence for operation ``listDataLakeExceptions(_:logger:)``. /// /// - Parameters: - /// - maxResults: List the maximum number of failures in Security Lake. + /// - maxResults: Lists the maximum number of failures in Security Lake. /// - regions: The Amazon Web Services Regions from which exceptions are retrieved. /// - logger: Logger used for logging @inlinable diff --git a/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift b/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift index 3c558e1bab..00d0f601bc 100644 --- a/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift +++ b/Sources/Soto/Services/SecurityLake/SecurityLake_shapes.swift @@ -74,7 +74,7 @@ extension SecurityLake { } public enum LogSourceResource: AWSEncodableShape & AWSDecodableShape, Sendable { - /// Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide. + /// Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide. case awsLogSource(AwsLogSourceResource) /// Amazon Security Lake supports custom source types. For more information, see the Amazon Security Lake User Guide. case customLogSource(CustomLogSourceResource) @@ -124,7 +124,7 @@ extension SecurityLake { } public enum NotificationConfiguration: AWSEncodableShape, Sendable { - /// The configurations for HTTPS subscriber notification. + /// The configurations used for HTTPS subscriber notification. case httpsNotificationConfiguration(HttpsNotificationConfiguration) /// The configurations for SQS subscriber notification. case sqsNotificationConfiguration(SqsNotificationConfiguration) @@ -157,9 +157,9 @@ extension SecurityLake { // MARK: Shapes public struct AwsIdentity: AWSEncodableShape & AWSDecodableShape { - /// The external ID used to estalish trust relationship with the AWS identity. + /// The external ID used to establish trust relationship with the Amazon Web Services identity. public let externalId: String - /// The AWS identity principal. + /// The Amazon Web Services identity principal. public let principal: String @inlinable @@ -186,9 +186,9 @@ extension SecurityLake { public let accounts: [String]? /// Specify the Regions where you want to enable Security Lake. public let regions: [String] - /// The name for a Amazon Web Services source. This must be a Regionally unique value. + /// The name for a Amazon Web Services source. public let sourceName: AwsLogSourceName - /// The version for a Amazon Web Services source. This must be a Regionally unique value. + /// The version for a Amazon Web Services source. public let sourceVersion: String? @inlinable @@ -264,7 +264,7 @@ extension SecurityLake { } public struct CreateAwsLogSourceResponse: AWSDecodableShape { - /// Lists all accounts in which enabling a natively supported Amazon Web Service as a Security Lake source failed. The failure occurred as these accounts are not part of an organization. + /// Lists all accounts in which enabling a natively supported Amazon Web Services service as a Security Lake source failed. The failure occurred as these accounts are not part of an organization. public let failed: [String]? @inlinable @@ -278,11 +278,11 @@ extension SecurityLake { } public struct CreateCustomLogSourceRequest: AWSEncodableShape { - /// The configuration for the third-party custom source. + /// The configuration used for the third-party custom source. public let configuration: CustomLogSourceConfiguration /// The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY KERNEL_EXTENSION MEMORY_ACTIVITY MODULE_ACTIVITY PROCESS_ACTIVITY REGISTRY_KEY_ACTIVITY REGISTRY_VALUE_ACTIVITY RESOURCE_ACTIVITY SCHEDULED_JOB_ACTIVITY SECURITY_FINDING ACCOUNT_CHANGE AUTHENTICATION AUTHORIZATION ENTITY_MANAGEMENT_AUDIT DHCP_ACTIVITY NETWORK_ACTIVITY DNS_ACTIVITY FTP_ACTIVITY HTTP_ACTIVITY RDP_ACTIVITY SMB_ACTIVITY SSH_ACTIVITY CONFIG_STATE INVENTORY_INFO EMAIL_ACTIVITY API_ACTIVITY CLOUD_API public let eventClasses: [String]? - /// Specify the name for a third-party custom source. This must be a Regionally unique value. + /// Specify the name for a third-party custom source. This must be a Regionally unique value. The sourceName you enter here, is used in the LogProviderRole name which follows the convention AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must use a CustomLogSource name that is shorter than or equal to 20 characters. This ensures that the LogProviderRole name is below the 64 character limit. public let sourceName: String /// Specify the source version for the third-party custom source, to limit log collection to a specific version of custom data source. public let sourceVersion: String? @@ -317,7 +317,7 @@ extension SecurityLake { } public struct CreateCustomLogSourceResponse: AWSDecodableShape { - /// The created third-party custom source. + /// The third-party custom source that was created. public let source: CustomLogSourceResource? @inlinable @@ -331,7 +331,7 @@ extension SecurityLake { } public struct CreateDataLakeExceptionSubscriptionRequest: AWSEncodableShape { - /// The expiration period and time-to-live (TTL). + /// The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains. public let exceptionTimeToLive: Int64? /// The Amazon Web Services account where you want to receive exception notifications. public let notificationEndpoint: String @@ -480,7 +480,7 @@ extension SecurityLake { public struct CreateSubscriberRequest: AWSEncodableShape { /// The Amazon S3 or Lake Formation access type. public let accessTypes: [AccessType]? - /// The supported Amazon Web Services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services. + /// The supported Amazon Web Services services from which logs and events are collected. Security Lake supports log and event collection for natively supported Amazon Web Services services. public let sources: [LogSourceResource] /// The description for your subscriber account in Security Lake. public let subscriberDescription: String? @@ -555,13 +555,13 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, max: 1011) try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, min: 1) - try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.crawlerArn, name: "crawlerArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$") try self.validate(self.databaseArn, name: "databaseArn", parent: name, max: 1011) try self.validate(self.databaseArn, name: "databaseArn", parent: name, min: 1) - try self.validate(self.databaseArn, name: "databaseArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.databaseArn, name: "databaseArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$") try self.validate(self.tableArn, name: "tableArn", parent: name, max: 1011) try self.validate(self.tableArn, name: "tableArn", parent: name, min: 1) - try self.validate(self.tableArn, name: "tableArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.tableArn, name: "tableArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$") } private enum CodingKeys: String, CodingKey { @@ -572,7 +572,7 @@ extension SecurityLake { } public struct CustomLogSourceConfiguration: AWSEncodableShape { - /// The configuration for the Glue Crawler for the third-party custom source. + /// The configuration used for the Glue Crawler for a third-party custom source. public let crawlerConfiguration: CustomLogSourceCrawlerConfiguration /// The identity of the log provider for the third-party custom source. public let providerIdentity: AwsIdentity @@ -731,7 +731,7 @@ extension SecurityLake { } public struct DataLakeEncryptionConfiguration: AWSEncodableShape & AWSDecodableShape { - /// The id of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object. + /// The identifier of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object. public let kmsKeyId: String? @inlinable @@ -846,7 +846,7 @@ extension SecurityLake { } public struct DataLakeResource: AWSDecodableShape { - /// Retrieves the status of the configuration operation for an account in Amazon Security Lake. + /// Retrieves the status of the CreateDatalake API call for an account in Amazon Security Lake. public let createStatus: DataLakeStatus? /// The Amazon Resource Name (ARN) created by you to provide to the subscriber. For more information about ARNs and how to use them in policies, see the Amazon Security Lake User Guide. public let dataLakeArn: String @@ -892,7 +892,7 @@ extension SecurityLake { public let account: String? /// The Open Cybersecurity Schema Framework (OCSF) event classes which describes the type of data that the custom source will send to Security Lake. The supported event classes are: ACCESS_ACTIVITY FILE_ACTIVITY KERNEL_ACTIVITY KERNEL_EXTENSION MEMORY_ACTIVITY MODULE_ACTIVITY PROCESS_ACTIVITY REGISTRY_KEY_ACTIVITY REGISTRY_VALUE_ACTIVITY RESOURCE_ACTIVITY SCHEDULED_JOB_ACTIVITY SECURITY_FINDING ACCOUNT_CHANGE AUTHENTICATION AUTHORIZATION ENTITY_MANAGEMENT_AUDIT DHCP_ACTIVITY NETWORK_ACTIVITY DNS_ACTIVITY FTP_ACTIVITY HTTP_ACTIVITY RDP_ACTIVITY SMB_ACTIVITY SSH_ACTIVITY CONFIG_STATE INVENTORY_INFO EMAIL_ACTIVITY API_ACTIVITY CLOUD_API public let eventClasses: [String]? - /// The supported Amazon Web Services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. + /// The supported Amazon Web Services services from which logs and events are collected. Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. public let sourceName: String? /// The log status for the Security Lake account. public let sourceStatuses: [DataLakeSourceStatus]? @@ -1164,7 +1164,7 @@ extension SecurityLake { } public struct GetDataLakeExceptionSubscriptionResponse: AWSDecodableShape { - /// The expiration period and time-to-live (TTL). + /// The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains. public let exceptionTimeToLive: Int64? /// The Amazon Web Services account where you receive exception notifications. public let notificationEndpoint: String? @@ -1190,7 +1190,7 @@ extension SecurityLake { } public struct GetDataLakeOrganizationConfigurationResponse: AWSDecodableShape { - /// The configuration for new accounts. + /// The configuration used for new accounts in Security Lake. public let autoEnableNewAccount: [DataLakeAutoEnableNewAccountConfiguration]? @inlinable @@ -1329,9 +1329,9 @@ extension SecurityLake { } public struct ListDataLakeExceptionsRequest: AWSEncodableShape { - /// List the maximum number of failures in Security Lake. + /// Lists the maximum number of failures in Security Lake. public let maxResults: Int? - /// List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. public let nextToken: String? /// The Amazon Web Services Regions from which exceptions are retrieved. public let regions: [String]? @@ -1360,9 +1360,9 @@ extension SecurityLake { } public struct ListDataLakeExceptionsResponse: AWSDecodableShape { - /// Lists the failures that cannot be retried in the current Region. + /// Lists the failures that cannot be retried. public let exceptions: [DataLakeException]? - /// List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. + /// Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error. public let nextToken: String? @inlinable @@ -1544,7 +1544,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$") } private enum CodingKeys: CodingKey {} @@ -1625,7 +1625,7 @@ extension SecurityLake { public let roleArn: String? /// The ARN for the Amazon S3 bucket. public let s3BucketArn: String? - /// Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide. + /// Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide. public let sources: [LogSourceResource] /// The subscriber ARN of the Amazon Security Lake subscriber account. public let subscriberArn: String @@ -1728,7 +1728,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$") try self.tags.forEach { try $0.validate(name: "\(name).tags[]") } @@ -1766,7 +1766,7 @@ extension SecurityLake { public func validate(name: String) throws { try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 1) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$") + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$") try self.tagKeys.forEach { try validate($0, name: "tagKeys[]", parent: name, max: 128) try validate($0, name: "tagKeys[]", parent: name, min: 1) @@ -1782,7 +1782,7 @@ extension SecurityLake { } public struct UpdateDataLakeExceptionSubscriptionRequest: AWSEncodableShape { - /// The time-to-live (TTL) for the exception message to remain. + /// The time-to-live (TTL) for the exception message to remain. It is the duration of time until which the exception message remains. public let exceptionTimeToLive: Int64? /// The account that is subscribed to receive exception notifications. public let notificationEndpoint: String @@ -1813,7 +1813,7 @@ extension SecurityLake { } public struct UpdateDataLakeRequest: AWSEncodableShape { - /// Specify the Region or Regions that will contribute data to the rollup region. + /// Specifies the Region or Regions that will contribute data to the rollup region. public let configurations: [DataLakeConfiguration] /// The Amazon Resource Name (ARN) used to create and update the Glue table. This table contains partitions generated by the ingestion and normalization of Amazon Web Services log sources and custom sources. public let metaStoreManagerRoleArn: String? @@ -1896,13 +1896,13 @@ extension SecurityLake { } public struct UpdateSubscriberRequest: AWSEncodableShape { - /// The supported Amazon Web Services from which logs and events are collected. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide. + /// The supported Amazon Web Services services from which logs and events are collected. For the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide. public let sources: [LogSourceResource]? /// The description of the Security Lake account subscriber. public let subscriberDescription: String? /// A value created by Security Lake that uniquely identifies your subscription. public let subscriberId: String - /// The AWS identity used to access your data. + /// The Amazon Web Services identity used to access your data. public let subscriberIdentity: AwsIdentity? /// The name of the Security Lake account subscriber. public let subscriberName: String? diff --git a/Sources/Soto/Services/SocialMessaging/SocialMessaging_api.swift b/Sources/Soto/Services/SocialMessaging/SocialMessaging_api.swift new file mode 100644 index 0000000000..dba3a5df4f --- /dev/null +++ b/Sources/Soto/Services/SocialMessaging/SocialMessaging_api.swift @@ -0,0 +1,509 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_exported import SotoCore + +/// Service object for interacting with AWS SocialMessaging service. +/// +/// Amazon Web Services End User Messaging Social, also referred to as Social messaging, is a messaging service that enables application developers to incorporate WhatsApp into their existing workflows. The Amazon Web Services End User Messaging Social API provides information about the Amazon Web Services End User Messaging Social API resources, including supported HTTP methods, parameters, and schemas. The Amazon Web Services End User Messaging Social API provides programmatic access to options that are unique to the WhatsApp Business Platform. If you're new to the Amazon Web Services End User Messaging Social API, it's also helpful to review What is Amazon Web Services End User Messaging Social in the Amazon Web Services End User Messaging Social User Guide. The Amazon Web Services End User Messaging Social User Guide provides tutorials, code samples, and procedures that demonstrate how to use Amazon Web Services End User Messaging Social API features programmatically and how to integrate functionality into applications. The guide also provides key information, such as integration with other Amazon Web Services services, and the quotas that apply to use of the service. Regional availability The Amazon Web Services End User Messaging Social API is available across several Amazon Web Services Regions and it provides a dedicated endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Web Services End User Messaging endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference. In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure. +public struct SocialMessaging: AWSService { + // MARK: Member variables + + /// Client used for communication with AWS + public let client: AWSClient + /// Service configuration + public let config: AWSServiceConfig + + // MARK: Initialization + + /// Initialize the SocialMessaging client + /// - parameters: + /// - client: AWSClient used to process requests + /// - region: Region of server you want to communicate with. This will override the partition parameter. + /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). + /// - endpoint: Custom endpoint URL to use instead of standard AWS servers + /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded + /// - timeout: Timeout value for HTTP requests + /// - byteBufferAllocator: Allocator for ByteBuffers + /// - options: Service options + public init( + client: AWSClient, + region: SotoCore.Region? = nil, + partition: AWSPartition = .aws, + endpoint: String? = nil, + middleware: AWSMiddlewareProtocol? = nil, + timeout: TimeAmount? = nil, + byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), + options: AWSServiceConfig.Options = [] + ) { + self.client = client + self.config = AWSServiceConfig( + region: region, + partition: region?.partition ?? partition, + serviceName: "SocialMessaging", + serviceIdentifier: "social-messaging", + serviceProtocol: .restjson, + apiVersion: "2024-01-01", + endpoint: endpoint, + errorType: SocialMessagingErrorType.self, + middleware: middleware, + timeout: timeout, + byteBufferAllocator: byteBufferAllocator, + options: options + ) + } + + + + + + // MARK: API Calls + + /// This is only used through the Amazon Web Services console during sign-up to associate your WhatsApp Business Account to your Amazon Web Services account. + @Sendable + @inlinable + public func associateWhatsAppBusinessAccount(_ input: AssociateWhatsAppBusinessAccountInput, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateWhatsAppBusinessAccountOutput { + try await self.client.execute( + operation: "AssociateWhatsAppBusinessAccount", + path: "/v1/whatsapp/signup", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// This is only used through the Amazon Web Services console during sign-up to associate your WhatsApp Business Account to your Amazon Web Services account. + /// + /// Parameters: + /// - setupFinalization: A JSON object that contains the phone numbers and WhatsApp Business Account to link to your account. + /// - signupCallback: Contains the callback access token. + /// - logger: Logger use during operation + @inlinable + public func associateWhatsAppBusinessAccount( + setupFinalization: WhatsAppSetupFinalization? = nil, + signupCallback: WhatsAppSignupCallback? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> AssociateWhatsAppBusinessAccountOutput { + let input = AssociateWhatsAppBusinessAccountInput( + setupFinalization: setupFinalization, + signupCallback: signupCallback + ) + return try await self.associateWhatsAppBusinessAccount(input, logger: logger) + } + + /// Delete a media object from the WhatsApp service. If the object is still in an Amazon S3 bucket you should delete it from there too. + @Sendable + @inlinable + public func deleteWhatsAppMessageMedia(_ input: DeleteWhatsAppMessageMediaInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteWhatsAppMessageMediaOutput { + try await self.client.execute( + operation: "DeleteWhatsAppMessageMedia", + path: "/v1/whatsapp/media", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete a media object from the WhatsApp service. If the object is still in an Amazon S3 bucket you should delete it from there too. + /// + /// Parameters: + /// - mediaId: The unique identifier of the media file to delete. Use the mediaId returned from PostWhatsAppMessageMedia. + /// - originationPhoneNumberId: The unique identifier of the originating phone number associated with the media. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + /// - logger: Logger use during operation + @inlinable + public func deleteWhatsAppMessageMedia( + mediaId: String, + originationPhoneNumberId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteWhatsAppMessageMediaOutput { + let input = DeleteWhatsAppMessageMediaInput( + mediaId: mediaId, + originationPhoneNumberId: originationPhoneNumberId + ) + return try await self.deleteWhatsAppMessageMedia(input, logger: logger) + } + + /// Disassociate a WhatsApp Business Account (WABA) from your Amazon Web Services account. + @Sendable + @inlinable + public func disassociateWhatsAppBusinessAccount(_ input: DisassociateWhatsAppBusinessAccountInput, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateWhatsAppBusinessAccountOutput { + try await self.client.execute( + operation: "DisassociateWhatsAppBusinessAccount", + path: "/v1/whatsapp/waba/disassociate", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Disassociate a WhatsApp Business Account (WABA) from your Amazon Web Services account. + /// + /// Parameters: + /// - id: The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details. + /// - logger: Logger use during operation + @inlinable + public func disassociateWhatsAppBusinessAccount( + id: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DisassociateWhatsAppBusinessAccountOutput { + let input = DisassociateWhatsAppBusinessAccountInput( + id: id + ) + return try await self.disassociateWhatsAppBusinessAccount(input, logger: logger) + } + + /// Get the details of your linked WhatsApp Business Account. + @Sendable + @inlinable + public func getLinkedWhatsAppBusinessAccount(_ input: GetLinkedWhatsAppBusinessAccountInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetLinkedWhatsAppBusinessAccountOutput { + try await self.client.execute( + operation: "GetLinkedWhatsAppBusinessAccount", + path: "/v1/whatsapp/waba/details", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get the details of your linked WhatsApp Business Account. + /// + /// Parameters: + /// - id: The unique identifier, from Amazon Web Services, of the linked WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details. + /// - logger: Logger use during operation + @inlinable + public func getLinkedWhatsAppBusinessAccount( + id: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetLinkedWhatsAppBusinessAccountOutput { + let input = GetLinkedWhatsAppBusinessAccountInput( + id: id + ) + return try await self.getLinkedWhatsAppBusinessAccount(input, logger: logger) + } + + /// Use your WhatsApp phone number id to get the WABA account id and phone number details. + @Sendable + @inlinable + public func getLinkedWhatsAppBusinessAccountPhoneNumber(_ input: GetLinkedWhatsAppBusinessAccountPhoneNumberInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetLinkedWhatsAppBusinessAccountPhoneNumberOutput { + try await self.client.execute( + operation: "GetLinkedWhatsAppBusinessAccountPhoneNumber", + path: "/v1/whatsapp/waba/phone/details", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Use your WhatsApp phone number id to get the WABA account id and phone number details. + /// + /// Parameters: + /// - id: The unique identifier of the phone number. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + /// - logger: Logger use during operation + @inlinable + public func getLinkedWhatsAppBusinessAccountPhoneNumber( + id: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetLinkedWhatsAppBusinessAccountPhoneNumberOutput { + let input = GetLinkedWhatsAppBusinessAccountPhoneNumberInput( + id: id + ) + return try await self.getLinkedWhatsAppBusinessAccountPhoneNumber(input, logger: logger) + } + + /// Get a media file from the WhatsApp service. On successful completion the media file is retrieved from Meta and stored in the specified Amazon S3 bucket. Use either destinationS3File or destinationS3PresignedUrl for the destination. If both are used then an InvalidParameterException is returned. + @Sendable + @inlinable + public func getWhatsAppMessageMedia(_ input: GetWhatsAppMessageMediaInput, logger: Logger = AWSClient.loggingDisabled) async throws -> GetWhatsAppMessageMediaOutput { + try await self.client.execute( + operation: "GetWhatsAppMessageMedia", + path: "/v1/whatsapp/media/get", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get a media file from the WhatsApp service. On successful completion the media file is retrieved from Meta and stored in the specified Amazon S3 bucket. Use either destinationS3File or destinationS3PresignedUrl for the destination. If both are used then an InvalidParameterException is returned. + /// + /// Parameters: + /// - destinationS3File: The bucketName and key of the S3 media file. + /// - destinationS3PresignedUrl: The presign url of the media file. + /// - mediaId: The unique identifier for the media file. + /// - metadataOnly: Set to True to get only the metadata for the file. + /// - originationPhoneNumberId: The unique identifier of the originating phone number for the WhatsApp message media. The phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + /// - logger: Logger use during operation + @inlinable + public func getWhatsAppMessageMedia( + destinationS3File: S3File? = nil, + destinationS3PresignedUrl: S3PresignedUrl? = nil, + mediaId: String, + metadataOnly: Bool? = nil, + originationPhoneNumberId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetWhatsAppMessageMediaOutput { + let input = GetWhatsAppMessageMediaInput( + destinationS3File: destinationS3File, + destinationS3PresignedUrl: destinationS3PresignedUrl, + mediaId: mediaId, + metadataOnly: metadataOnly, + originationPhoneNumberId: originationPhoneNumberId + ) + return try await self.getWhatsAppMessageMedia(input, logger: logger) + } + + /// List all WhatsApp Business Accounts linked to your Amazon Web Services account. + @Sendable + @inlinable + public func listLinkedWhatsAppBusinessAccounts(_ input: ListLinkedWhatsAppBusinessAccountsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListLinkedWhatsAppBusinessAccountsOutput { + try await self.client.execute( + operation: "ListLinkedWhatsAppBusinessAccounts", + path: "/v1/whatsapp/waba/list", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List all WhatsApp Business Accounts linked to your Amazon Web Services account. + /// + /// Parameters: + /// - maxResults: The maximum number of results to return. + /// - nextToken: The next token for pagination. + /// - logger: Logger use during operation + @inlinable + public func listLinkedWhatsAppBusinessAccounts( + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListLinkedWhatsAppBusinessAccountsOutput { + let input = ListLinkedWhatsAppBusinessAccountsInput( + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listLinkedWhatsAppBusinessAccounts(input, logger: logger) + } + + /// List all tags associated with a resource, such as a phone number or WABA. + @Sendable + @inlinable + public func listTagsForResource(_ input: ListTagsForResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceOutput { + try await self.client.execute( + operation: "ListTagsForResource", + path: "/v1/tags/list", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List all tags associated with a resource, such as a phone number or WABA. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource to retrieve the tags from. + /// - logger: Logger use during operation + @inlinable + public func listTagsForResource( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagsForResourceOutput { + let input = ListTagsForResourceInput( + resourceArn: resourceArn + ) + return try await self.listTagsForResource(input, logger: logger) + } + + /// Upload a media file to the WhatsApp service. Only the specified originationPhoneNumberId has the permissions to send the media file when using SendWhatsAppMessage. You must use either sourceS3File or sourceS3PresignedUrl for the source. If both or neither are specified then an InvalidParameterException is returned. + @Sendable + @inlinable + public func postWhatsAppMessageMedia(_ input: PostWhatsAppMessageMediaInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PostWhatsAppMessageMediaOutput { + try await self.client.execute( + operation: "PostWhatsAppMessageMedia", + path: "/v1/whatsapp/media", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Upload a media file to the WhatsApp service. Only the specified originationPhoneNumberId has the permissions to send the media file when using SendWhatsAppMessage. You must use either sourceS3File or sourceS3PresignedUrl for the source. If both or neither are specified then an InvalidParameterException is returned. + /// + /// Parameters: + /// - originationPhoneNumberId: The ID of the phone number to associate with the WhatsApp media file. The phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + /// - sourceS3File: The source S3 url for the media file. + /// - sourceS3PresignedUrl: The source presign url of the media file. + /// - logger: Logger use during operation + @inlinable + public func postWhatsAppMessageMedia( + originationPhoneNumberId: String, + sourceS3File: S3File? = nil, + sourceS3PresignedUrl: S3PresignedUrl? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> PostWhatsAppMessageMediaOutput { + let input = PostWhatsAppMessageMediaInput( + originationPhoneNumberId: originationPhoneNumberId, + sourceS3File: sourceS3File, + sourceS3PresignedUrl: sourceS3PresignedUrl + ) + return try await self.postWhatsAppMessageMedia(input, logger: logger) + } + + /// Add an event destination to log event data from WhatsApp for a WhatsApp Business Account (WABA). A WABA can only have one event destination at a time. All resources associated with the WABA use the same event destination. + @Sendable + @inlinable + public func putWhatsAppBusinessAccountEventDestinations(_ input: PutWhatsAppBusinessAccountEventDestinationsInput, logger: Logger = AWSClient.loggingDisabled) async throws -> PutWhatsAppBusinessAccountEventDestinationsOutput { + try await self.client.execute( + operation: "PutWhatsAppBusinessAccountEventDestinations", + path: "/v1/whatsapp/waba/eventdestinations", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Add an event destination to log event data from WhatsApp for a WhatsApp Business Account (WABA). A WABA can only have one event destination at a time. All resources associated with the WABA use the same event destination. + /// + /// Parameters: + /// - eventDestinations: An array of WhatsAppBusinessAccountEventDestination event destinations. + /// - id: The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details. + /// - logger: Logger use during operation + @inlinable + public func putWhatsAppBusinessAccountEventDestinations( + eventDestinations: [WhatsAppBusinessAccountEventDestination], + id: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> PutWhatsAppBusinessAccountEventDestinationsOutput { + let input = PutWhatsAppBusinessAccountEventDestinationsInput( + eventDestinations: eventDestinations, + id: id + ) + return try await self.putWhatsAppBusinessAccountEventDestinations(input, logger: logger) + } + + /// Send a WhatsApp message. For examples of sending a message using the Amazon Web Services CLI, see Sending messages in the Amazon Web Services End User Messaging Social User Guide . + @Sendable + @inlinable + public func sendWhatsAppMessage(_ input: SendWhatsAppMessageInput, logger: Logger = AWSClient.loggingDisabled) async throws -> SendWhatsAppMessageOutput { + try await self.client.execute( + operation: "SendWhatsAppMessage", + path: "/v1/whatsapp/send", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Send a WhatsApp message. For examples of sending a message using the Amazon Web Services CLI, see Sending messages in the Amazon Web Services End User Messaging Social User Guide . + /// + /// Parameters: + /// - message: The message to send through WhatsApp. The length is in KB. The message field passes through a WhatsApp Message object, see Messages in the WhatsApp Business Platform Cloud API Reference. + /// - metaApiVersion: The API version for the request formatted as v{VersionNumber}. For a list of supported API versions and Amazon Web Services Regions, see Amazon Web Services End User Messaging Social API Service Endpoints in the Amazon Web Services General Reference. + /// - originationPhoneNumberId: The ID of the phone number used to send the WhatsApp message. If you are sending a media file only the originationPhoneNumberId used to upload the file can be used. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + /// - logger: Logger use during operation + @inlinable + public func sendWhatsAppMessage( + message: AWSBase64Data, + metaApiVersion: String, + originationPhoneNumberId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> SendWhatsAppMessageOutput { + let input = SendWhatsAppMessageInput( + message: message, + metaApiVersion: metaApiVersion, + originationPhoneNumberId: originationPhoneNumberId + ) + return try await self.sendWhatsAppMessage(input, logger: logger) + } + + /// Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. + @Sendable + @inlinable + public func tagResource(_ input: TagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceOutput { + try await self.client.execute( + operation: "TagResource", + path: "/v1/tags/tag-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource to tag. + /// - tags: The tags to add to the resource. + /// - logger: Logger use during operation + @inlinable + public func tagResource( + resourceArn: String, + tags: [Tag], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TagResourceOutput { + let input = TagResourceInput( + resourceArn: resourceArn, + tags: tags + ) + return try await self.tagResource(input, logger: logger) + } + + /// Removes the specified tags from a resource. + @Sendable + @inlinable + public func untagResource(_ input: UntagResourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceOutput { + try await self.client.execute( + operation: "UntagResource", + path: "/v1/tags/untag-resource", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Removes the specified tags from a resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Resource Name (ARN) of the resource to remove tags from. + /// - tagKeys: The keys of the tags to remove from the resource. + /// - logger: Logger use during operation + @inlinable + public func untagResource( + resourceArn: String, + tagKeys: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UntagResourceOutput { + let input = UntagResourceInput( + resourceArn: resourceArn, + tagKeys: tagKeys + ) + return try await self.untagResource(input, logger: logger) + } +} + +extension SocialMessaging { + /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public + /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. + public init(from: SocialMessaging, patch: AWSServiceConfig.Patch) { + self.client = from.client + self.config = from.config.with(patch: patch) + } +} diff --git a/Sources/Soto/Services/SocialMessaging/SocialMessaging_shapes.swift b/Sources/Soto/Services/SocialMessaging/SocialMessaging_shapes.swift new file mode 100644 index 0000000000..f65685db9b --- /dev/null +++ b/Sources/Soto/Services/SocialMessaging/SocialMessaging_shapes.swift @@ -0,0 +1,1013 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Soto for AWS open source project +// +// Copyright (c) 2017-2024 the Soto project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.txt for the list of Soto project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. +// DO NOT EDIT. + +#if os(Linux) && compiler(<5.10) +// swift-corelibs-foundation hasn't been updated with Sendable conformances +@preconcurrency import Foundation +#else +import Foundation +#endif +@_spi(SotoInternal) import SotoCore + +extension SocialMessaging { + // MARK: Enums + + public enum RegistrationStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case complete = "COMPLETE" + case incomplete = "INCOMPLETE" + public var description: String { return self.rawValue } + } + + // MARK: Shapes + + public struct AssociateWhatsAppBusinessAccountInput: AWSEncodableShape { + /// A JSON object that contains the phone numbers and WhatsApp Business Account to link to your account. + public let setupFinalization: WhatsAppSetupFinalization? + /// Contains the callback access token. + public let signupCallback: WhatsAppSignupCallback? + + @inlinable + public init(setupFinalization: WhatsAppSetupFinalization? = nil, signupCallback: WhatsAppSignupCallback? = nil) { + self.setupFinalization = setupFinalization + self.signupCallback = signupCallback + } + + public func validate(name: String) throws { + try self.setupFinalization?.validate(name: "\(name).setupFinalization") + } + + private enum CodingKeys: String, CodingKey { + case setupFinalization = "setupFinalization" + case signupCallback = "signupCallback" + } + } + + public struct AssociateWhatsAppBusinessAccountOutput: AWSDecodableShape { + /// Contains your WhatsApp registration status. + public let signupCallbackResult: WhatsAppSignupCallbackResult? + /// The status code for the response. + public let statusCode: Int? + + @inlinable + public init(signupCallbackResult: WhatsAppSignupCallbackResult? = nil, statusCode: Int? = nil) { + self.signupCallbackResult = signupCallbackResult + self.statusCode = statusCode + } + + private enum CodingKeys: String, CodingKey { + case signupCallbackResult = "signupCallbackResult" + case statusCode = "statusCode" + } + } + + public struct DeleteWhatsAppMessageMediaInput: AWSEncodableShape { + /// The unique identifier of the media file to delete. Use the mediaId returned from PostWhatsAppMessageMedia. + public let mediaId: String + /// The unique identifier of the originating phone number associated with the media. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + public let originationPhoneNumberId: String + + @inlinable + public init(mediaId: String, originationPhoneNumberId: String) { + self.mediaId = mediaId + self.originationPhoneNumberId = originationPhoneNumberId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.mediaId, key: "mediaId") + request.encodeQuery(self.originationPhoneNumberId, key: "originationPhoneNumberId") + } + + public func validate(name: String) throws { + try self.validate(self.mediaId, name: "mediaId", parent: name, max: 100) + try self.validate(self.mediaId, name: "mediaId", parent: name, min: 1) + try self.validate(self.mediaId, name: "mediaId", parent: name, pattern: "^[A-Za-z0-9]+$") + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, max: 100) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, min: 1) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, pattern: "(^phone-number-id-.*$)|(^arn:.*:phone-number-id/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteWhatsAppMessageMediaOutput: AWSDecodableShape { + /// Success indicator for deleting the media file. + public let success: Bool? + + @inlinable + public init(success: Bool? = nil) { + self.success = success + } + + private enum CodingKeys: String, CodingKey { + case success = "success" + } + } + + public struct DisassociateWhatsAppBusinessAccountInput: AWSEncodableShape { + /// The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details. + public let id: String + + @inlinable + public init(id: String) { + self.id = id + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.id, key: "id") + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, min: 1) + try self.validate(self.id, name: "id", parent: name, pattern: "(^waba-.*$)|(^arn:.*:waba/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DisassociateWhatsAppBusinessAccountOutput: AWSDecodableShape { + public init() {} + } + + public struct GetLinkedWhatsAppBusinessAccountInput: AWSEncodableShape { + /// The unique identifier, from Amazon Web Services, of the linked WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details. + public let id: String + + @inlinable + public init(id: String) { + self.id = id + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.id, key: "id") + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, min: 1) + try self.validate(self.id, name: "id", parent: name, pattern: "(^waba-.*$)|(^arn:.*:waba/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetLinkedWhatsAppBusinessAccountOutput: AWSDecodableShape { + /// The details of the linked WhatsApp Business Account. + public let account: LinkedWhatsAppBusinessAccount? + + @inlinable + public init(account: LinkedWhatsAppBusinessAccount? = nil) { + self.account = account + } + + private enum CodingKeys: String, CodingKey { + case account = "account" + } + } + + public struct GetLinkedWhatsAppBusinessAccountPhoneNumberInput: AWSEncodableShape { + /// The unique identifier of the phone number. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + public let id: String + + @inlinable + public init(id: String) { + self.id = id + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.id, key: "id") + } + + public func validate(name: String) throws { + try self.validate(self.id, name: "id", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, min: 1) + try self.validate(self.id, name: "id", parent: name, pattern: "(^phone-number-id-.*$)|(^arn:.*:phone-number-id/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetLinkedWhatsAppBusinessAccountPhoneNumberOutput: AWSDecodableShape { + /// The WABA identifier linked to the phone number, formatted as waba-01234567890123456789012345678901. + public let linkedWhatsAppBusinessAccountId: String? + public let phoneNumber: WhatsAppPhoneNumberDetail? + + @inlinable + public init(linkedWhatsAppBusinessAccountId: String? = nil, phoneNumber: WhatsAppPhoneNumberDetail? = nil) { + self.linkedWhatsAppBusinessAccountId = linkedWhatsAppBusinessAccountId + self.phoneNumber = phoneNumber + } + + private enum CodingKeys: String, CodingKey { + case linkedWhatsAppBusinessAccountId = "linkedWhatsAppBusinessAccountId" + case phoneNumber = "phoneNumber" + } + } + + public struct GetWhatsAppMessageMediaInput: AWSEncodableShape { + /// The bucketName and key of the S3 media file. + public let destinationS3File: S3File? + /// The presign url of the media file. + public let destinationS3PresignedUrl: S3PresignedUrl? + /// The unique identifier for the media file. + public let mediaId: String + /// Set to True to get only the metadata for the file. + public let metadataOnly: Bool? + /// The unique identifier of the originating phone number for the WhatsApp message media. The phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + public let originationPhoneNumberId: String + + @inlinable + public init(destinationS3File: S3File? = nil, destinationS3PresignedUrl: S3PresignedUrl? = nil, mediaId: String, metadataOnly: Bool? = nil, originationPhoneNumberId: String) { + self.destinationS3File = destinationS3File + self.destinationS3PresignedUrl = destinationS3PresignedUrl + self.mediaId = mediaId + self.metadataOnly = metadataOnly + self.originationPhoneNumberId = originationPhoneNumberId + } + + public func validate(name: String) throws { + try self.validate(self.mediaId, name: "mediaId", parent: name, max: 100) + try self.validate(self.mediaId, name: "mediaId", parent: name, min: 1) + try self.validate(self.mediaId, name: "mediaId", parent: name, pattern: "^[A-Za-z0-9]+$") + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, max: 100) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, min: 1) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, pattern: "(^phone-number-id-.*$)|(^arn:.*:phone-number-id/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: String, CodingKey { + case destinationS3File = "destinationS3File" + case destinationS3PresignedUrl = "destinationS3PresignedUrl" + case mediaId = "mediaId" + case metadataOnly = "metadataOnly" + case originationPhoneNumberId = "originationPhoneNumberId" + } + } + + public struct GetWhatsAppMessageMediaOutput: AWSDecodableShape { + /// The file size of the media, in KB. + public let fileSize: Int64? + /// The MIME type of the media. + public let mimeType: String? + + @inlinable + public init(fileSize: Int64? = nil, mimeType: String? = nil) { + self.fileSize = fileSize + self.mimeType = mimeType + } + + private enum CodingKeys: String, CodingKey { + case fileSize = "fileSize" + case mimeType = "mimeType" + } + } + + public struct LinkedWhatsAppBusinessAccount: AWSDecodableShape { + /// The ARN of the linked WhatsApp Business Account. + public let arn: String + /// The event destinations for the linked WhatsApp Business Account. + public let eventDestinations: [WhatsAppBusinessAccountEventDestination] + /// The ID of the linked WhatsApp Business Account, formatted as waba-01234567890123456789012345678901. + public let id: String + /// The date the WhatsApp Business Account was linked. + public let linkDate: Date + /// The phone numbers associated with the Linked WhatsApp Business Account. + public let phoneNumbers: [WhatsAppPhoneNumberSummary] + /// The registration status of the linked WhatsApp Business Account. + public let registrationStatus: RegistrationStatus + /// The WhatsApp Business Account ID from meta. + public let wabaId: String + /// The name of the linked WhatsApp Business Account. + public let wabaName: String + + @inlinable + public init(arn: String, eventDestinations: [WhatsAppBusinessAccountEventDestination], id: String, linkDate: Date, phoneNumbers: [WhatsAppPhoneNumberSummary], registrationStatus: RegistrationStatus, wabaId: String, wabaName: String) { + self.arn = arn + self.eventDestinations = eventDestinations + self.id = id + self.linkDate = linkDate + self.phoneNumbers = phoneNumbers + self.registrationStatus = registrationStatus + self.wabaId = wabaId + self.wabaName = wabaName + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case eventDestinations = "eventDestinations" + case id = "id" + case linkDate = "linkDate" + case phoneNumbers = "phoneNumbers" + case registrationStatus = "registrationStatus" + case wabaId = "wabaId" + case wabaName = "wabaName" + } + } + + public struct LinkedWhatsAppBusinessAccountIdMetaData: AWSDecodableShape { + /// The name of your account. + public let accountName: String? + /// The registration status of the linked WhatsApp Business Account. + public let registrationStatus: RegistrationStatus? + /// The details for unregistered WhatsApp phone numbers. + public let unregisteredWhatsAppPhoneNumbers: [WhatsAppPhoneNumberDetail]? + + @inlinable + public init(accountName: String? = nil, registrationStatus: RegistrationStatus? = nil, unregisteredWhatsAppPhoneNumbers: [WhatsAppPhoneNumberDetail]? = nil) { + self.accountName = accountName + self.registrationStatus = registrationStatus + self.unregisteredWhatsAppPhoneNumbers = unregisteredWhatsAppPhoneNumbers + } + + private enum CodingKeys: String, CodingKey { + case accountName = "accountName" + case registrationStatus = "registrationStatus" + case unregisteredWhatsAppPhoneNumbers = "unregisteredWhatsAppPhoneNumbers" + } + } + + public struct LinkedWhatsAppBusinessAccountSummary: AWSDecodableShape { + /// The ARN of the linked WhatsApp Business Account. + public let arn: String + /// The event destinations for the linked WhatsApp Business Account. + public let eventDestinations: [WhatsAppBusinessAccountEventDestination] + /// The ID of the linked WhatsApp Business Account, formatted as waba-01234567890123456789012345678901. + public let id: String + /// The date the WhatsApp Business Account was linked. + public let linkDate: Date + /// The registration status of the linked WhatsApp Business Account. + public let registrationStatus: RegistrationStatus + /// The WhatsApp Business Account ID provided by Meta. + public let wabaId: String + /// The name of the linked WhatsApp Business Account. + public let wabaName: String + + @inlinable + public init(arn: String, eventDestinations: [WhatsAppBusinessAccountEventDestination], id: String, linkDate: Date, registrationStatus: RegistrationStatus, wabaId: String, wabaName: String) { + self.arn = arn + self.eventDestinations = eventDestinations + self.id = id + self.linkDate = linkDate + self.registrationStatus = registrationStatus + self.wabaId = wabaId + self.wabaName = wabaName + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case eventDestinations = "eventDestinations" + case id = "id" + case linkDate = "linkDate" + case registrationStatus = "registrationStatus" + case wabaId = "wabaId" + case wabaName = "wabaName" + } + } + + public struct ListLinkedWhatsAppBusinessAccountsInput: AWSEncodableShape { + /// The maximum number of results to return. + public let maxResults: Int? + /// The next token for pagination. + public let nextToken: String? + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 100) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 600) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListLinkedWhatsAppBusinessAccountsOutput: AWSDecodableShape { + /// A list of WhatsApp Business Accounts linked to your Amazon Web Services account. + public let linkedAccounts: [LinkedWhatsAppBusinessAccountSummary]? + /// The next token for pagination. + public let nextToken: String? + + @inlinable + public init(linkedAccounts: [LinkedWhatsAppBusinessAccountSummary]? = nil, nextToken: String? = nil) { + self.linkedAccounts = linkedAccounts + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case linkedAccounts = "linkedAccounts" + case nextToken = "nextToken" + } + } + + public struct ListTagsForResourceInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource to retrieve the tags from. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.resourceArn, key: "resourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:.*$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceOutput: AWSDecodableShape { + /// The status code of the response. + public let statusCode: Int? + /// The tags for the resource. + public let tags: [Tag]? + + @inlinable + public init(statusCode: Int? = nil, tags: [Tag]? = nil) { + self.statusCode = statusCode + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case statusCode = "statusCode" + case tags = "tags" + } + } + + public struct PostWhatsAppMessageMediaInput: AWSEncodableShape { + /// The ID of the phone number to associate with the WhatsApp media file. The phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + public let originationPhoneNumberId: String + /// The source S3 url for the media file. + public let sourceS3File: S3File? + /// The source presign url of the media file. + public let sourceS3PresignedUrl: S3PresignedUrl? + + @inlinable + public init(originationPhoneNumberId: String, sourceS3File: S3File? = nil, sourceS3PresignedUrl: S3PresignedUrl? = nil) { + self.originationPhoneNumberId = originationPhoneNumberId + self.sourceS3File = sourceS3File + self.sourceS3PresignedUrl = sourceS3PresignedUrl + } + + public func validate(name: String) throws { + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, max: 100) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, min: 1) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, pattern: "(^phone-number-id-.*$)|(^arn:.*:phone-number-id/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: String, CodingKey { + case originationPhoneNumberId = "originationPhoneNumberId" + case sourceS3File = "sourceS3File" + case sourceS3PresignedUrl = "sourceS3PresignedUrl" + } + } + + public struct PostWhatsAppMessageMediaOutput: AWSDecodableShape { + /// The unique identifier of the posted WhatsApp message. + public let mediaId: String? + + @inlinable + public init(mediaId: String? = nil) { + self.mediaId = mediaId + } + + private enum CodingKeys: String, CodingKey { + case mediaId = "mediaId" + } + } + + public struct PutWhatsAppBusinessAccountEventDestinationsInput: AWSEncodableShape { + /// An array of WhatsAppBusinessAccountEventDestination event destinations. + public let eventDestinations: [WhatsAppBusinessAccountEventDestination] + /// The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as waba-01234567890123456789012345678901. Use ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details. + public let id: String + + @inlinable + public init(eventDestinations: [WhatsAppBusinessAccountEventDestination], id: String) { + self.eventDestinations = eventDestinations + self.id = id + } + + public func validate(name: String) throws { + try self.eventDestinations.forEach { + try $0.validate(name: "\(name).eventDestinations[]") + } + try self.validate(self.eventDestinations, name: "eventDestinations", parent: name, max: 1) + try self.validate(self.id, name: "id", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, min: 1) + try self.validate(self.id, name: "id", parent: name, pattern: "(^waba-.*$)|(^arn:.*:waba/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: String, CodingKey { + case eventDestinations = "eventDestinations" + case id = "id" + } + } + + public struct PutWhatsAppBusinessAccountEventDestinationsOutput: AWSDecodableShape { + public init() {} + } + + public struct S3File: AWSEncodableShape { + /// The bucket name. + public let bucketName: String + /// The object key of the media file. + public let key: String + + @inlinable + public init(bucketName: String, key: String) { + self.bucketName = bucketName + self.key = key + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + case key = "key" + } + } + + public struct S3PresignedUrl: AWSEncodableShape { + /// A map of headers and their values. You must specify the Content-Type header when using PostWhatsAppMessageMedia. For a list of common headers, see Common Request Headers in the Amazon S3 API Reference + public let headers: [String: String] + /// The presign url to the object. + public let url: String + + @inlinable + public init(headers: [String: String], url: String) { + self.headers = headers + self.url = url + } + + private enum CodingKeys: String, CodingKey { + case headers = "headers" + case url = "url" + } + } + + public struct SendWhatsAppMessageInput: AWSEncodableShape { + /// The message to send through WhatsApp. The length is in KB. The message field passes through a WhatsApp Message object, see Messages in the WhatsApp Business Platform Cloud API Reference. + public let message: AWSBase64Data + /// The API version for the request formatted as v{VersionNumber}. For a list of supported API versions and Amazon Web Services Regions, see Amazon Web Services End User Messaging Social API Service Endpoints in the Amazon Web Services General Reference. + public let metaApiVersion: String + /// The ID of the phone number used to send the WhatsApp message. If you are sending a media file only the originationPhoneNumberId used to upload the file can be used. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + public let originationPhoneNumberId: String + + @inlinable + public init(message: AWSBase64Data, metaApiVersion: String, originationPhoneNumberId: String) { + self.message = message + self.metaApiVersion = metaApiVersion + self.originationPhoneNumberId = originationPhoneNumberId + } + + public func validate(name: String) throws { + try self.validate(self.message, name: "message", parent: name, max: 2048000) + try self.validate(self.message, name: "message", parent: name, min: 1) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, max: 100) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, min: 1) + try self.validate(self.originationPhoneNumberId, name: "originationPhoneNumberId", parent: name, pattern: "(^phone-number-id-.*$)|(^arn:.*:phone-number-id/[0-9a-zA-Z]+$)") + } + + private enum CodingKeys: String, CodingKey { + case message = "message" + case metaApiVersion = "metaApiVersion" + case originationPhoneNumberId = "originationPhoneNumberId" + } + } + + public struct SendWhatsAppMessageOutput: AWSDecodableShape { + /// The unique identifier of the message. + public let messageId: String? + + @inlinable + public init(messageId: String? = nil) { + self.messageId = messageId + } + + private enum CodingKeys: String, CodingKey { + case messageId = "messageId" + } + } + + public struct Tag: AWSEncodableShape & AWSDecodableShape { + /// The tag key. + public let key: String + /// The tag value. + public let value: String? + + @inlinable + public init(key: String, value: String? = nil) { + self.key = key + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case key = "key" + case value = "value" + } + } + + public struct TagResourceInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource to tag. + public let resourceArn: String + /// The tags to add to the resource. + public let tags: [Tag] + + @inlinable + public init(resourceArn: String, tags: [Tag]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:.*$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + case tags = "tags" + } + } + + public struct TagResourceOutput: AWSDecodableShape { + /// The status code of the tag resource operation. + public let statusCode: Int? + + @inlinable + public init(statusCode: Int? = nil) { + self.statusCode = statusCode + } + + private enum CodingKeys: String, CodingKey { + case statusCode = "statusCode" + } + } + + public struct UntagResourceInput: AWSEncodableShape { + /// The Amazon Resource Name (ARN) of the resource to remove tags from. + public let resourceArn: String + /// The keys of the tags to remove from the resource. + public let tagKeys: [String] + + @inlinable + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:.*$") + } + + private enum CodingKeys: String, CodingKey { + case resourceArn = "resourceArn" + case tagKeys = "tagKeys" + } + } + + public struct UntagResourceOutput: AWSDecodableShape { + /// The status code of the untag resource operation. + public let statusCode: Int? + + @inlinable + public init(statusCode: Int? = nil) { + self.statusCode = statusCode + } + + private enum CodingKeys: String, CodingKey { + case statusCode = "statusCode" + } + } + + public struct WabaPhoneNumberSetupFinalization: AWSEncodableShape { + /// The two letter ISO region for the location of where Meta will store data. Asia–Pacific (APAC) Australia AU Indonesia ID India IN Japan JP Singapore SG South Korea KR Europe Germany DE Switzerland CH United Kingdom GB Latin America (LATAM) Brazil BR Middle East and Africa (MEA) Bahrain BH South Africa ZA United Arab Emirates AE North America (NORAM) Canada CA + public let dataLocalizationRegion: String? + /// The unique identifier of the originating phone number associated with the media. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. Use GetLinkedWhatsAppBusinessAccount to find a phone number's id. + public let id: String + /// An array of key and value pair tags. + public let tags: [Tag]? + /// The PIN to use for two-step verification. To reset your PIN follow the directions in Updating PIN in the WhatsApp Business Platform Cloud API Reference. + public let twoFactorPin: String + + @inlinable + public init(dataLocalizationRegion: String? = nil, id: String, tags: [Tag]? = nil, twoFactorPin: String) { + self.dataLocalizationRegion = dataLocalizationRegion + self.id = id + self.tags = tags + self.twoFactorPin = twoFactorPin + } + + public func validate(name: String) throws { + try self.validate(self.dataLocalizationRegion, name: "dataLocalizationRegion", parent: name, pattern: "^[A-Z]{2}$") + try self.validate(self.id, name: "id", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, min: 1) + try self.validate(self.twoFactorPin, name: "twoFactorPin", parent: name, max: 6) + try self.validate(self.twoFactorPin, name: "twoFactorPin", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case dataLocalizationRegion = "dataLocalizationRegion" + case id = "id" + case tags = "tags" + case twoFactorPin = "twoFactorPin" + } + } + + public struct WabaSetupFinalization: AWSEncodableShape { + /// The event destinations for the linked WhatsApp Business Account. + public let eventDestinations: [WhatsAppBusinessAccountEventDestination]? + /// The ID of the linked WhatsApp Business Account, formatted as waba-01234567890123456789012345678901. + public let id: String? + /// An array of key and value pair tags. + public let tags: [Tag]? + + @inlinable + public init(eventDestinations: [WhatsAppBusinessAccountEventDestination]? = nil, id: String? = nil, tags: [Tag]? = nil) { + self.eventDestinations = eventDestinations + self.id = id + self.tags = tags + } + + public func validate(name: String) throws { + try self.eventDestinations?.forEach { + try $0.validate(name: "\(name).eventDestinations[]") + } + try self.validate(self.eventDestinations, name: "eventDestinations", parent: name, max: 1) + try self.validate(self.id, name: "id", parent: name, max: 100) + try self.validate(self.id, name: "id", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case eventDestinations = "eventDestinations" + case id = "id" + case tags = "tags" + } + } + + public struct WhatsAppBusinessAccountEventDestination: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the event destination. + public let eventDestinationArn: String + + @inlinable + public init(eventDestinationArn: String) { + self.eventDestinationArn = eventDestinationArn + } + + public func validate(name: String) throws { + try self.validate(self.eventDestinationArn, name: "eventDestinationArn", parent: name, max: 2048) + try self.validate(self.eventDestinationArn, name: "eventDestinationArn", parent: name, pattern: "^arn:.*:[a-z-]+([/:](.*))?$") + } + + private enum CodingKeys: String, CodingKey { + case eventDestinationArn = "eventDestinationArn" + } + } + + public struct WhatsAppPhoneNumberDetail: AWSDecodableShape { + /// The ARN of the WhatsApp phone number. + public let arn: String + /// The phone number that appears in the recipients display. + public let displayPhoneNumber: String + /// The display name for this phone number. + public let displayPhoneNumberName: String + /// The phone number ID from Meta. + public let metaPhoneNumberId: String + /// The phone number for sending WhatsApp. + public let phoneNumber: String + /// The phone number ID. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. + public let phoneNumberId: String + /// The quality rating of the phone number. + public let qualityRating: String + + @inlinable + public init(arn: String, displayPhoneNumber: String, displayPhoneNumberName: String, metaPhoneNumberId: String, phoneNumber: String, phoneNumberId: String, qualityRating: String) { + self.arn = arn + self.displayPhoneNumber = displayPhoneNumber + self.displayPhoneNumberName = displayPhoneNumberName + self.metaPhoneNumberId = metaPhoneNumberId + self.phoneNumber = phoneNumber + self.phoneNumberId = phoneNumberId + self.qualityRating = qualityRating + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case displayPhoneNumber = "displayPhoneNumber" + case displayPhoneNumberName = "displayPhoneNumberName" + case metaPhoneNumberId = "metaPhoneNumberId" + case phoneNumber = "phoneNumber" + case phoneNumberId = "phoneNumberId" + case qualityRating = "qualityRating" + } + } + + public struct WhatsAppPhoneNumberSummary: AWSDecodableShape { + /// The full Amazon Resource Name (ARN) for the phone number. + public let arn: String + /// The phone number that appears in the recipients display. + public let displayPhoneNumber: String + /// The display name for this phone number. + public let displayPhoneNumberName: String + /// The phone number ID from Meta. + public let metaPhoneNumberId: String + /// The phone number associated with the Linked WhatsApp Business Account. + public let phoneNumber: String + /// The phone number ID. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901. + public let phoneNumberId: String + /// The quality rating of the phone number. This is from Meta. + public let qualityRating: String + + @inlinable + public init(arn: String, displayPhoneNumber: String, displayPhoneNumberName: String, metaPhoneNumberId: String, phoneNumber: String, phoneNumberId: String, qualityRating: String) { + self.arn = arn + self.displayPhoneNumber = displayPhoneNumber + self.displayPhoneNumberName = displayPhoneNumberName + self.metaPhoneNumberId = metaPhoneNumberId + self.phoneNumber = phoneNumber + self.phoneNumberId = phoneNumberId + self.qualityRating = qualityRating + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case displayPhoneNumber = "displayPhoneNumber" + case displayPhoneNumberName = "displayPhoneNumberName" + case metaPhoneNumberId = "metaPhoneNumberId" + case phoneNumber = "phoneNumber" + case phoneNumberId = "phoneNumberId" + case qualityRating = "qualityRating" + } + } + + public struct WhatsAppSetupFinalization: AWSEncodableShape { + /// An Amazon Web Services access token generated by WhatsAppSignupCallback and used by WhatsAppSetupFinalization. + public let associateInProgressToken: String + /// Used to add a new phone number to an existing WhatsApp Business Account. This field can't be used when the waba field is present. + public let phoneNumberParent: String? + /// An array of WabaPhoneNumberSetupFinalization objects containing the details of each phone number associated with the WhatsApp Business Account. + public let phoneNumbers: [WabaPhoneNumberSetupFinalization] + /// Used to create a new WhatsApp Business Account and add a phone number. This field can't be used when the phoneNumberParent field is present. + public let waba: WabaSetupFinalization? + + @inlinable + public init(associateInProgressToken: String, phoneNumberParent: String? = nil, phoneNumbers: [WabaPhoneNumberSetupFinalization], waba: WabaSetupFinalization? = nil) { + self.associateInProgressToken = associateInProgressToken + self.phoneNumberParent = phoneNumberParent + self.phoneNumbers = phoneNumbers + self.waba = waba + } + + public func validate(name: String) throws { + try self.validate(self.associateInProgressToken, name: "associateInProgressToken", parent: name, max: 50) + try self.validate(self.phoneNumberParent, name: "phoneNumberParent", parent: name, max: 100) + try self.validate(self.phoneNumberParent, name: "phoneNumberParent", parent: name, min: 1) + try self.validate(self.phoneNumberParent, name: "phoneNumberParent", parent: name, pattern: "(^waba-.*$)|(^arn:.*:waba/[0-9a-zA-Z]+$)") + try self.phoneNumbers.forEach { + try $0.validate(name: "\(name).phoneNumbers[]") + } + try self.waba?.validate(name: "\(name).waba") + } + + private enum CodingKeys: String, CodingKey { + case associateInProgressToken = "associateInProgressToken" + case phoneNumberParent = "phoneNumberParent" + case phoneNumbers = "phoneNumbers" + case waba = "waba" + } + } + + public struct WhatsAppSignupCallback: AWSEncodableShape { + /// The access token for your WhatsApp Business Account. The accessToken value is provided by Meta. + public let accessToken: String + + @inlinable + public init(accessToken: String) { + self.accessToken = accessToken + } + + private enum CodingKeys: String, CodingKey { + case accessToken = "accessToken" + } + } + + public struct WhatsAppSignupCallbackResult: AWSDecodableShape { + /// An Amazon Web Services access token generated by WhatsAppSignupCallback and used by WhatsAppSetupFinalization. + public let associateInProgressToken: String? + /// A LinkedWhatsAppBusinessAccountIdMetaData object map containing the details of any WhatsAppBusiness accounts that have incomplete setup. + public let linkedAccountsWithIncompleteSetup: [String: LinkedWhatsAppBusinessAccountIdMetaData]? + + @inlinable + public init(associateInProgressToken: String? = nil, linkedAccountsWithIncompleteSetup: [String: LinkedWhatsAppBusinessAccountIdMetaData]? = nil) { + self.associateInProgressToken = associateInProgressToken + self.linkedAccountsWithIncompleteSetup = linkedAccountsWithIncompleteSetup + } + + private enum CodingKeys: String, CodingKey { + case associateInProgressToken = "associateInProgressToken" + case linkedAccountsWithIncompleteSetup = "linkedAccountsWithIncompleteSetup" + } + } +} + +// MARK: - Errors + +/// Error enum for SocialMessaging +public struct SocialMessagingErrorType: AWSErrorType { + enum Code: String { + case accessDeniedByMetaException = "AccessDeniedByMetaException" + case accessDeniedException = "AccessDeniedException" + case dependencyException = "DependencyException" + case internalServiceException = "InternalServiceException" + case invalidParametersException = "InvalidParametersException" + case resourceNotFoundException = "ResourceNotFoundException" + case throttledRequestException = "ThrottledRequestException" + case validationException = "ValidationException" + } + + private let error: Code + public let context: AWSErrorContext? + + /// initialize SocialMessaging + public init?(errorCode: String, context: AWSErrorContext) { + guard let error = Code(rawValue: errorCode) else { return nil } + self.error = error + self.context = context + } + + internal init(_ error: Code) { + self.error = error + self.context = nil + } + + /// return error code string + public var errorCode: String { self.error.rawValue } + + /// You do not have sufficient access to perform this action. + public static var accessDeniedByMetaException: Self { .init(.accessDeniedByMetaException) } + /// You do not have sufficient access to perform this action. + public static var accessDeniedException: Self { .init(.accessDeniedException) } + /// Thrown when performing an action because a dependency would be broken. + public static var dependencyException: Self { .init(.dependencyException) } + /// The request processing has failed because of an unknown error, exception, or failure. + public static var internalServiceException: Self { .init(.internalServiceException) } + /// One or more parameters provided to the action are not valid. + public static var invalidParametersException: Self { .init(.invalidParametersException) } + /// The resource was not found. + public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } + /// The request was denied due to request throttling. + public static var throttledRequestException: Self { .init(.throttledRequestException) } + /// The request contains an invalid parameter value. + public static var validationException: Self { .init(.validationException) } +} + +extension SocialMessagingErrorType: Equatable { + public static func == (lhs: SocialMessagingErrorType, rhs: SocialMessagingErrorType) -> Bool { + lhs.error == rhs.error + } +} + +extension SocialMessagingErrorType: CustomStringConvertible { + public var description: String { + return "\(self.error.rawValue): \(self.message ?? "")" + } +} diff --git a/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift b/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift index b705e0ff0b..2e659fe056 100644 --- a/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift +++ b/Sources/Soto/Services/SupplyChain/SupplyChain_api.swift @@ -95,7 +95,7 @@ public struct SupplyChain: AWSService { /// CreateBillOfMaterialsImportJob creates an import job for the Product Bill Of Materials (BOM) entity. For information on the product_bom entity, see the AWS Supply Chain User Guide. The CSV file must be located in an Amazon S3 location accessible to AWS Supply Chain. It is recommended to use the same Amazon S3 bucket created during your AWS Supply Chain instance creation. /// /// Parameters: - /// - clientToken: An idempotency token. + /// - clientToken: An idempotency token ensures the API request is only completed no more than once. This way, retrying the request will not trigger the operation multiple times. A client token is a unique, case-sensitive string of 33 to 128 ASCII characters. To make an idempotent API request, specify a client token in the request. You should not reuse the same client token for other requests. If you retry a successful request with the same client token, the request will succeed with no further actions being taken, and you will receive the same API response as the original successful request. /// - instanceId: The AWS Supply Chain instance identifier. /// - s3uri: The S3 URI of the CSV file to be imported. The bucket must grant permissions for AWS Supply Chain to read the file. /// - logger: Logger use during operation @@ -114,6 +114,231 @@ public struct SupplyChain: AWSService { return try await self.createBillOfMaterialsImportJob(input, logger: logger) } + /// Create DataIntegrationFlow to map one or more different sources to one target using the SQL transformation query. + @Sendable + @inlinable + public func createDataIntegrationFlow(_ input: CreateDataIntegrationFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataIntegrationFlowResponse { + try await self.client.execute( + operation: "CreateDataIntegrationFlow", + path: "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Create DataIntegrationFlow to map one or more different sources to one target using the SQL transformation query. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - name: Name of the DataIntegrationFlow. + /// - sources: The source configurations for DataIntegrationFlow. + /// - tags: The tags of the DataIntegrationFlow to be created + /// - target: The target configurations for DataIntegrationFlow. + /// - transformation: The transformation configurations for DataIntegrationFlow. + /// - logger: Logger use during operation + @inlinable + public func createDataIntegrationFlow( + instanceId: String, + name: String, + sources: [DataIntegrationFlowSource], + tags: [String: String]? = nil, + target: DataIntegrationFlowTarget, + transformation: DataIntegrationFlowTransformation, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateDataIntegrationFlowResponse { + let input = CreateDataIntegrationFlowRequest( + instanceId: instanceId, + name: name, + sources: sources, + tags: tags, + target: target, + transformation: transformation + ) + return try await self.createDataIntegrationFlow(input, logger: logger) + } + + /// Create a data lake dataset. + @Sendable + @inlinable + public func createDataLakeDataset(_ input: CreateDataLakeDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateDataLakeDatasetResponse { + try await self.client.execute( + operation: "CreateDataLakeDataset", + path: "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}", + httpMethod: .PUT, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Create a data lake dataset. + /// + /// Parameters: + /// - description: The description of the dataset. + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - name: The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + /// - namespace: The name space of the dataset. asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + /// - schema: The custom schema of the data lake dataset and is only required when the name space is default. + /// - tags: The tags of the dataset. + /// - logger: Logger use during operation + @inlinable + public func createDataLakeDataset( + description: String? = nil, + instanceId: String, + name: String, + namespace: String, + schema: DataLakeDatasetSchema? = nil, + tags: [String: String]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateDataLakeDatasetResponse { + let input = CreateDataLakeDatasetRequest( + description: description, + instanceId: instanceId, + name: name, + namespace: namespace, + schema: schema, + tags: tags + ) + return try await self.createDataLakeDataset(input, logger: logger) + } + + /// Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance. + @Sendable + @inlinable + public func createInstance(_ input: CreateInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateInstanceResponse { + try await self.client.execute( + operation: "CreateInstance", + path: "/api/instance", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance. + /// + /// Parameters: + /// - clientToken: The client token for idempotency. + /// - instanceDescription: The AWS Supply Chain instance description. + /// - instanceName: The AWS Supply Chain instance name. + /// - kmsKeyArn: The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key. + /// - tags: The Amazon Web Services tags of an instance to be created. + /// - logger: Logger use during operation + @inlinable + public func createInstance( + clientToken: String? = CreateInstanceRequest.idempotencyToken(), + instanceDescription: String? = nil, + instanceName: String? = nil, + kmsKeyArn: String? = nil, + tags: [String: String]? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> CreateInstanceResponse { + let input = CreateInstanceRequest( + clientToken: clientToken, + instanceDescription: instanceDescription, + instanceName: instanceName, + kmsKeyArn: kmsKeyArn, + tags: tags + ) + return try await self.createInstance(input, logger: logger) + } + + /// Delete the DataIntegrationFlow. + @Sendable + @inlinable + public func deleteDataIntegrationFlow(_ input: DeleteDataIntegrationFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataIntegrationFlowResponse { + try await self.client.execute( + operation: "DeleteDataIntegrationFlow", + path: "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete the DataIntegrationFlow. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - name: The name of the DataIntegrationFlow to be deleted. + /// - logger: Logger use during operation + @inlinable + public func deleteDataIntegrationFlow( + instanceId: String, + name: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteDataIntegrationFlowResponse { + let input = DeleteDataIntegrationFlowRequest( + instanceId: instanceId, + name: name + ) + return try await self.deleteDataIntegrationFlow(input, logger: logger) + } + + /// Delete a data lake dataset. + @Sendable + @inlinable + public func deleteDataLakeDataset(_ input: DeleteDataLakeDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteDataLakeDatasetResponse { + try await self.client.execute( + operation: "DeleteDataLakeDataset", + path: "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete a data lake dataset. + /// + /// Parameters: + /// - instanceId: The AWS Supply Chain instance identifier. + /// - name: The name of the dataset. If the namespace is asc, the name must be one of the supported data entities . + /// - namespace: The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// - logger: Logger use during operation + @inlinable + public func deleteDataLakeDataset( + instanceId: String, + name: String, + namespace: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteDataLakeDatasetResponse { + let input = DeleteDataLakeDatasetRequest( + instanceId: instanceId, + name: name, + namespace: namespace + ) + return try await self.deleteDataLakeDataset(input, logger: logger) + } + + /// Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status. + @Sendable + @inlinable + public func deleteInstance(_ input: DeleteInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteInstanceResponse { + try await self.client.execute( + operation: "DeleteInstance", + path: "/api/instance/{instanceId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status. + /// + /// Parameters: + /// - instanceId: The AWS Supply Chain instance identifier. + /// - logger: Logger use during operation + @inlinable + public func deleteInstance( + instanceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> DeleteInstanceResponse { + let input = DeleteInstanceRequest( + instanceId: instanceId + ) + return try await self.deleteInstance(input, logger: logger) + } + /// Get status and details of a BillOfMaterialsImportJob. @Sendable @inlinable @@ -146,6 +371,242 @@ public struct SupplyChain: AWSService { return try await self.getBillOfMaterialsImportJob(input, logger: logger) } + /// View the DataIntegrationFlow details. + @Sendable + @inlinable + public func getDataIntegrationFlow(_ input: GetDataIntegrationFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataIntegrationFlowResponse { + try await self.client.execute( + operation: "GetDataIntegrationFlow", + path: "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// View the DataIntegrationFlow details. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - name: The name of the DataIntegrationFlow created. + /// - logger: Logger use during operation + @inlinable + public func getDataIntegrationFlow( + instanceId: String, + name: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetDataIntegrationFlowResponse { + let input = GetDataIntegrationFlowRequest( + instanceId: instanceId, + name: name + ) + return try await self.getDataIntegrationFlow(input, logger: logger) + } + + /// Get a data lake dataset. + @Sendable + @inlinable + public func getDataLakeDataset(_ input: GetDataLakeDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetDataLakeDatasetResponse { + try await self.client.execute( + operation: "GetDataLakeDataset", + path: "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get a data lake dataset. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - name: The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + /// - namespace: The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + /// - logger: Logger use during operation + @inlinable + public func getDataLakeDataset( + instanceId: String, + name: String, + namespace: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetDataLakeDatasetResponse { + let input = GetDataLakeDatasetRequest( + instanceId: instanceId, + name: name, + namespace: namespace + ) + return try await self.getDataLakeDataset(input, logger: logger) + } + + /// Get the AWS Supply Chain instance details. + @Sendable + @inlinable + public func getInstance(_ input: GetInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetInstanceResponse { + try await self.client.execute( + operation: "GetInstance", + path: "/api/instance/{instanceId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Get the AWS Supply Chain instance details. + /// + /// Parameters: + /// - instanceId: The AWS Supply Chain instance identifier + /// - logger: Logger use during operation + @inlinable + public func getInstance( + instanceId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetInstanceResponse { + let input = GetInstanceRequest( + instanceId: instanceId + ) + return try await self.getInstance(input, logger: logger) + } + + /// Lists all the DataIntegrationFlows in a paginated way. + @Sendable + @inlinable + public func listDataIntegrationFlows(_ input: ListDataIntegrationFlowsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataIntegrationFlowsResponse { + try await self.client.execute( + operation: "ListDataIntegrationFlows", + path: "/api/data-integration/instance/{instanceId}/data-integration-flows", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists all the DataIntegrationFlows in a paginated way. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - maxResults: Specify the maximum number of DataIntegrationFlows to fetch in one paginated request. + /// - nextToken: The pagination token to fetch the next page of the DataIntegrationFlows. + /// - logger: Logger use during operation + @inlinable + public func listDataIntegrationFlows( + instanceId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListDataIntegrationFlowsResponse { + let input = ListDataIntegrationFlowsRequest( + instanceId: instanceId, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listDataIntegrationFlows(input, logger: logger) + } + + /// List the data lake datasets for a specific instance and name space. + @Sendable + @inlinable + public func listDataLakeDatasets(_ input: ListDataLakeDatasetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDataLakeDatasetsResponse { + try await self.client.execute( + operation: "ListDataLakeDatasets", + path: "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List the data lake datasets for a specific instance and name space. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - maxResults: The max number of datasets to fetch in this paginated request. + /// - namespace: The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// - nextToken: The pagination token to fetch next page of datasets. + /// - logger: Logger use during operation + @inlinable + public func listDataLakeDatasets( + instanceId: String, + maxResults: Int? = nil, + namespace: String, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListDataLakeDatasetsResponse { + let input = ListDataLakeDatasetsRequest( + instanceId: instanceId, + maxResults: maxResults, + namespace: namespace, + nextToken: nextToken + ) + return try await self.listDataLakeDatasets(input, logger: logger) + } + + /// List all the AWS Supply Chain instances in a paginated way. + @Sendable + @inlinable + public func listInstances(_ input: ListInstancesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListInstancesResponse { + try await self.client.execute( + operation: "ListInstances", + path: "/api/instance", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List all the AWS Supply Chain instances in a paginated way. + /// + /// Parameters: + /// - instanceNameFilter: The filter to ListInstances based on their names. + /// - instanceStateFilter: The filter to ListInstances based on their state. + /// - maxResults: Specify the maximum number of instances to fetch in this paginated request. + /// - nextToken: The pagination token to fetch the next page of instances. + /// - logger: Logger use during operation + @inlinable + public func listInstances( + instanceNameFilter: [String]? = nil, + instanceStateFilter: [InstanceState]? = nil, + maxResults: Int? = nil, + nextToken: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListInstancesResponse { + let input = ListInstancesRequest( + instanceNameFilter: instanceNameFilter, + instanceStateFilter: instanceStateFilter, + maxResults: maxResults, + nextToken: nextToken + ) + return try await self.listInstances(input, logger: logger) + } + + /// List all the tags for an Amazon Web ServicesSupply Chain resource. + @Sendable + @inlinable + public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { + try await self.client.execute( + operation: "ListTagsForResource", + path: "/api/tags/{resourceArn}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// List all the tags for an Amazon Web ServicesSupply Chain resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Web Services Supply chain resource ARN that needs tags to be listed. + /// - logger: Logger use during operation + @inlinable + public func listTagsForResource( + resourceArn: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListTagsForResourceResponse { + let input = ListTagsForResourceRequest( + resourceArn: resourceArn + ) + return try await self.listTagsForResource(input, logger: logger) + } + /// Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion. @Sendable @inlinable @@ -189,6 +650,184 @@ public struct SupplyChain: AWSService { ) return try await self.sendDataIntegrationEvent(input, logger: logger) } + + /// Create tags for an Amazon Web Services Supply chain resource. + @Sendable + @inlinable + public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { + try await self.client.execute( + operation: "TagResource", + path: "/api/tags/{resourceArn}", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Create tags for an Amazon Web Services Supply chain resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Web Services Supply chain resource ARN that needs to be tagged. + /// - tags: The tags of the Amazon Web Services Supply chain resource to be created. + /// - logger: Logger use during operation + @inlinable + public func tagResource( + resourceArn: String, + tags: [String: String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> TagResourceResponse { + let input = TagResourceRequest( + resourceArn: resourceArn, + tags: tags + ) + return try await self.tagResource(input, logger: logger) + } + + /// Delete tags for an Amazon Web Services Supply chain resource. + @Sendable + @inlinable + public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { + try await self.client.execute( + operation: "UntagResource", + path: "/api/tags/{resourceArn}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Delete tags for an Amazon Web Services Supply chain resource. + /// + /// Parameters: + /// - resourceArn: The Amazon Web Services Supply chain resource ARN that needs to be untagged. + /// - tagKeys: The list of tag keys to be deleted for an Amazon Web Services Supply Chain resource. + /// - logger: Logger use during operation + @inlinable + public func untagResource( + resourceArn: String, + tagKeys: [String], + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UntagResourceResponse { + let input = UntagResourceRequest( + resourceArn: resourceArn, + tagKeys: tagKeys + ) + return try await self.untagResource(input, logger: logger) + } + + /// Update the DataIntegrationFlow. + @Sendable + @inlinable + public func updateDataIntegrationFlow(_ input: UpdateDataIntegrationFlowRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataIntegrationFlowResponse { + try await self.client.execute( + operation: "UpdateDataIntegrationFlow", + path: "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update the DataIntegrationFlow. + /// + /// Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - name: The name of the DataIntegrationFlow to be updated. + /// - sources: The new source configurations for the DataIntegrationFlow. + /// - target: The new target configurations for the DataIntegrationFlow. + /// - transformation: The new transformation configurations for the DataIntegrationFlow. + /// - logger: Logger use during operation + @inlinable + public func updateDataIntegrationFlow( + instanceId: String, + name: String, + sources: [DataIntegrationFlowSource]? = nil, + target: DataIntegrationFlowTarget? = nil, + transformation: DataIntegrationFlowTransformation? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateDataIntegrationFlowResponse { + let input = UpdateDataIntegrationFlowRequest( + instanceId: instanceId, + name: name, + sources: sources, + target: target, + transformation: transformation + ) + return try await self.updateDataIntegrationFlow(input, logger: logger) + } + + /// Update a data lake dataset. + @Sendable + @inlinable + public func updateDataLakeDataset(_ input: UpdateDataLakeDatasetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDataLakeDatasetResponse { + try await self.client.execute( + operation: "UpdateDataLakeDataset", + path: "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update a data lake dataset. + /// + /// Parameters: + /// - description: The updated description of the data lake dataset. + /// - instanceId: The Amazon Web Services Chain instance identifier. + /// - name: The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + /// - namespace: The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + /// - logger: Logger use during operation + @inlinable + public func updateDataLakeDataset( + description: String? = nil, + instanceId: String, + name: String, + namespace: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateDataLakeDatasetResponse { + let input = UpdateDataLakeDatasetRequest( + description: description, + instanceId: instanceId, + name: name, + namespace: namespace + ) + return try await self.updateDataLakeDataset(input, logger: logger) + } + + /// Update the instance. + @Sendable + @inlinable + public func updateInstance(_ input: UpdateInstanceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateInstanceResponse { + try await self.client.execute( + operation: "UpdateInstance", + path: "/api/instance/{instanceId}", + httpMethod: .PATCH, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Update the instance. + /// + /// Parameters: + /// - instanceDescription: The AWS Supply Chain instance description. + /// - instanceId: The AWS Supply Chain instance identifier. + /// - instanceName: The AWS Supply Chain instance name. + /// - logger: Logger use during operation + @inlinable + public func updateInstance( + instanceDescription: String? = nil, + instanceId: String, + instanceName: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> UpdateInstanceResponse { + let input = UpdateInstanceRequest( + instanceDescription: instanceDescription, + instanceId: instanceId, + instanceName: instanceName + ) + return try await self.updateInstance(input, logger: logger) + } } extension SupplyChain { @@ -199,3 +838,160 @@ extension SupplyChain { self.config = from.config.with(patch: patch) } } + +// MARK: Paginators + +@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) +extension SupplyChain { + /// Return PaginatorSequence for operation ``listDataIntegrationFlows(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listDataIntegrationFlowsPaginator( + _ input: ListDataIntegrationFlowsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataIntegrationFlows, + inputKey: \ListDataIntegrationFlowsRequest.nextToken, + outputKey: \ListDataIntegrationFlowsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listDataIntegrationFlows(_:logger:)``. + /// + /// - Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - maxResults: Specify the maximum number of DataIntegrationFlows to fetch in one paginated request. + /// - logger: Logger used for logging + @inlinable + public func listDataIntegrationFlowsPaginator( + instanceId: String, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListDataIntegrationFlowsRequest( + instanceId: instanceId, + maxResults: maxResults + ) + return self.listDataIntegrationFlowsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listDataLakeDatasets(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listDataLakeDatasetsPaginator( + _ input: ListDataLakeDatasetsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listDataLakeDatasets, + inputKey: \ListDataLakeDatasetsRequest.nextToken, + outputKey: \ListDataLakeDatasetsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listDataLakeDatasets(_:logger:)``. + /// + /// - Parameters: + /// - instanceId: The Amazon Web Services Supply Chain instance identifier. + /// - maxResults: The max number of datasets to fetch in this paginated request. + /// - namespace: The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + /// - logger: Logger used for logging + @inlinable + public func listDataLakeDatasetsPaginator( + instanceId: String, + maxResults: Int? = nil, + namespace: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListDataLakeDatasetsRequest( + instanceId: instanceId, + maxResults: maxResults, + namespace: namespace + ) + return self.listDataLakeDatasetsPaginator(input, logger: logger) + } + + /// Return PaginatorSequence for operation ``listInstances(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listInstancesPaginator( + _ input: ListInstancesRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listInstances, + inputKey: \ListInstancesRequest.nextToken, + outputKey: \ListInstancesResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listInstances(_:logger:)``. + /// + /// - Parameters: + /// - instanceNameFilter: The filter to ListInstances based on their names. + /// - instanceStateFilter: The filter to ListInstances based on their state. + /// - maxResults: Specify the maximum number of instances to fetch in this paginated request. + /// - logger: Logger used for logging + @inlinable + public func listInstancesPaginator( + instanceNameFilter: [String]? = nil, + instanceStateFilter: [InstanceState]? = nil, + maxResults: Int? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListInstancesRequest( + instanceNameFilter: instanceNameFilter, + instanceStateFilter: instanceStateFilter, + maxResults: maxResults + ) + return self.listInstancesPaginator(input, logger: logger) + } +} + +extension SupplyChain.ListDataIntegrationFlowsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> SupplyChain.ListDataIntegrationFlowsRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + nextToken: token + ) + } +} + +extension SupplyChain.ListDataLakeDatasetsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> SupplyChain.ListDataLakeDatasetsRequest { + return .init( + instanceId: self.instanceId, + maxResults: self.maxResults, + namespace: self.namespace, + nextToken: token + ) + } +} + +extension SupplyChain.ListInstancesRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> SupplyChain.ListInstancesRequest { + return .init( + instanceNameFilter: self.instanceNameFilter, + instanceStateFilter: self.instanceStateFilter, + maxResults: self.maxResults, + nextToken: token + ) + } +} diff --git a/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift b/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift index f8b5dc9410..b47798c7cb 100644 --- a/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift +++ b/Sources/Soto/Services/SupplyChain/SupplyChain_shapes.swift @@ -54,6 +54,55 @@ extension SupplyChain { public var description: String { return self.rawValue } } + public enum DataIntegrationFlowFileType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case csv = "CSV" + case json = "JSON" + case parquet = "PARQUET" + public var description: String { return self.rawValue } + } + + public enum DataIntegrationFlowLoadType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case incremental = "INCREMENTAL" + case replace = "REPLACE" + public var description: String { return self.rawValue } + } + + public enum DataIntegrationFlowSourceType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dataset = "DATASET" + case s3 = "S3" + public var description: String { return self.rawValue } + } + + public enum DataIntegrationFlowTargetType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case dataset = "DATASET" + case s3 = "S3" + public var description: String { return self.rawValue } + } + + public enum DataIntegrationFlowTransformationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case none = "NONE" + case sql = "SQL" + public var description: String { return self.rawValue } + } + + public enum DataLakeDatasetSchemaFieldType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case double = "DOUBLE" + case int = "INT" + case string = "STRING" + case timestamp = "TIMESTAMP" + public var description: String { return self.rawValue } + } + + public enum InstanceState: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "Active" + case createFailed = "CreateFailed" + case deleteFailed = "DeleteFailed" + case deleted = "Deleted" + case deleting = "Deleting" + case initializing = "Initializing" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct BillOfMaterialsImportJob: AWSDecodableShape { @@ -87,7 +136,7 @@ extension SupplyChain { } public struct CreateBillOfMaterialsImportJobRequest: AWSEncodableShape { - /// An idempotency token. + /// An idempotency token ensures the API request is only completed no more than once. This way, retrying the request will not trigger the operation multiple times. A client token is a unique, case-sensitive string of 33 to 128 ASCII characters. To make an idempotent API request, specify a client token in the request. You should not reuse the same client token for other requests. If you retry a successful request with the same client token, the request will succeed with no further actions being taken, and you will receive the same API response as the original successful request. public let clientToken: String? /// The AWS Supply Chain instance identifier. public let instanceId: String @@ -139,118 +188,1522 @@ extension SupplyChain { } } - public struct GetBillOfMaterialsImportJobRequest: AWSEncodableShape { - /// The AWS Supply Chain instance identifier. + public struct CreateDataIntegrationFlowRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. public let instanceId: String - /// The BillOfMaterialsImportJob identifier. - public let jobId: String + /// Name of the DataIntegrationFlow. + public let name: String + /// The source configurations for DataIntegrationFlow. + public let sources: [DataIntegrationFlowSource] + /// The tags of the DataIntegrationFlow to be created + public let tags: [String: String]? + /// The target configurations for DataIntegrationFlow. + public let target: DataIntegrationFlowTarget + /// The transformation configurations for DataIntegrationFlow. + public let transformation: DataIntegrationFlowTransformation @inlinable - public init(instanceId: String, jobId: String) { + public init(instanceId: String, name: String, sources: [DataIntegrationFlowSource], tags: [String: String]? = nil, target: DataIntegrationFlowTarget, transformation: DataIntegrationFlowTransformation) { self.instanceId = instanceId - self.jobId = jobId + self.name = name + self.sources = sources + self.tags = tags + self.target = target + self.transformation = transformation } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) + var container = encoder.container(keyedBy: CodingKeys.self) request.encodePath(self.instanceId, key: "instanceId") - request.encodePath(self.jobId, key: "jobId") + request.encodePath(self.name, key: "name") + try container.encode(self.sources, forKey: .sources) + try container.encodeIfPresent(self.tags, forKey: .tags) + try container.encode(self.target, forKey: .target) + try container.encode(self.transformation, forKey: .transformation) } public func validate(name: String) throws { try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") - try self.validate(self.jobId, name: "jobId", parent: name, max: 36) - try self.validate(self.jobId, name: "jobId", parent: name, min: 36) - try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9-]+$") + try self.sources.forEach { + try $0.validate(name: "\(name).sources[]") + } + try self.validate(self.sources, name: "sources", parent: name, max: 40) + try self.validate(self.sources, name: "sources", parent: name, min: 1) + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + try self.target.validate(name: "\(name).target") + try self.transformation.validate(name: "\(name).transformation") } - private enum CodingKeys: CodingKey {} + private enum CodingKeys: String, CodingKey { + case sources = "sources" + case tags = "tags" + case target = "target" + case transformation = "transformation" + } } - public struct GetBillOfMaterialsImportJobResponse: AWSDecodableShape { - /// The BillOfMaterialsImportJob. - public let job: BillOfMaterialsImportJob + public struct CreateDataIntegrationFlowResponse: AWSDecodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The name of the DataIntegrationFlow created. + public let name: String @inlinable - public init(job: BillOfMaterialsImportJob) { - self.job = job + public init(instanceId: String, name: String) { + self.instanceId = instanceId + self.name = name } private enum CodingKeys: String, CodingKey { - case job = "job" + case instanceId = "instanceId" + case name = "name" } } - public struct SendDataIntegrationEventRequest: AWSEncodableShape { - /// The idempotent client token. - public let clientToken: String? - /// The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain . - public let data: String - /// Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning. - public let eventGroupId: String - /// The event timestamp (in epoch seconds). - public let eventTimestamp: Date? - /// The data event type. - public let eventType: DataIntegrationEventType - /// The AWS Supply Chain instance identifier. + public struct CreateDataLakeDatasetRequest: AWSEncodableShape { + /// The description of the dataset. + public let description: String? + /// The Amazon Web Services Supply Chain instance identifier. public let instanceId: String + /// The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + public let name: String + /// The name space of the dataset. asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + public let namespace: String + /// The custom schema of the data lake dataset and is only required when the name space is default. + public let schema: DataLakeDatasetSchema? + /// The tags of the dataset. + public let tags: [String: String]? @inlinable - public init(clientToken: String? = SendDataIntegrationEventRequest.idempotencyToken(), data: String, eventGroupId: String, eventTimestamp: Date? = nil, eventType: DataIntegrationEventType, instanceId: String) { - self.clientToken = clientToken - self.data = data - self.eventGroupId = eventGroupId - self.eventTimestamp = eventTimestamp - self.eventType = eventType + public init(description: String? = nil, instanceId: String, name: String, namespace: String, schema: DataLakeDatasetSchema? = nil, tags: [String: String]? = nil) { + self.description = description self.instanceId = instanceId + self.name = name + self.namespace = namespace + self.schema = schema + self.tags = tags } public func encode(to encoder: Encoder) throws { let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer var container = encoder.container(keyedBy: CodingKeys.self) - try container.encodeIfPresent(self.clientToken, forKey: .clientToken) - try container.encode(self.data, forKey: .data) - try container.encode(self.eventGroupId, forKey: .eventGroupId) - try container.encodeIfPresent(self.eventTimestamp, forKey: .eventTimestamp) - try container.encode(self.eventType, forKey: .eventType) + try container.encodeIfPresent(self.description, forKey: .description) request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + request.encodePath(self.namespace, key: "namespace") + try container.encodeIfPresent(self.schema, forKey: .schema) + try container.encodeIfPresent(self.tags, forKey: .tags) } public func validate(name: String) throws { - try self.validate(self.clientToken, name: "clientToken", parent: name, max: 126) - try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) - try self.validate(self.data, name: "data", parent: name, max: 1048576) - try self.validate(self.data, name: "data", parent: name, min: 1) - try self.validate(self.eventGroupId, name: "eventGroupId", parent: name, max: 255) - try self.validate(self.eventGroupId, name: "eventGroupId", parent: name, min: 1) + try self.validate(self.description, name: "description", parent: name, max: 500) + try self.validate(self.description, name: "description", parent: name, min: 1) try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 75) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-z0-9_]+$") + try self.validate(self.namespace, name: "namespace", parent: name, max: 50) + try self.validate(self.namespace, name: "namespace", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, pattern: "^[a-z]+$") + try self.schema?.validate(name: "\(name).schema") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + case schema = "schema" + case tags = "tags" + } + } + + public struct CreateDataLakeDatasetResponse: AWSDecodableShape { + /// The detail of created dataset. + public let dataset: DataLakeDataset + + @inlinable + public init(dataset: DataLakeDataset) { + self.dataset = dataset + } + + private enum CodingKeys: String, CodingKey { + case dataset = "dataset" + } + } + + public struct CreateInstanceRequest: AWSEncodableShape { + /// The client token for idempotency. + public let clientToken: String? + /// The AWS Supply Chain instance description. + public let instanceDescription: String? + /// The AWS Supply Chain instance name. + public let instanceName: String? + /// The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key. + public let kmsKeyArn: String? + /// The Amazon Web Services tags of an instance to be created. + public let tags: [String: String]? + + @inlinable + public init(clientToken: String? = CreateInstanceRequest.idempotencyToken(), instanceDescription: String? = nil, instanceName: String? = nil, kmsKeyArn: String? = nil, tags: [String: String]? = nil) { + self.clientToken = clientToken + self.instanceDescription = instanceDescription + self.instanceName = instanceName + self.kmsKeyArn = kmsKeyArn + self.tags = tags + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 126) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.instanceDescription, name: "instanceDescription", parent: name, max: 501) + try self.validate(self.instanceDescription, name: "instanceDescription", parent: name, pattern: "^([a-zA-Z0-9., _ʼ'%-]){0,500}$") + try self.validate(self.instanceName, name: "instanceName", parent: name, max: 63) + try self.validate(self.instanceName, name: "instanceName", parent: name, pattern: "^(?![ _ʼ'%-])[a-zA-Z0-9 _ʼ'%-]{0,62}[a-zA-Z0-9]$") + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, max: 2048) + try self.validate(self.kmsKeyArn, name: "kmsKeyArn", parent: name, pattern: "^arn:[a-z0-9][-.a-z0-9]{0,62}:kms:([a-z0-9][-.a-z0-9]{0,62})?:([a-z0-9][-.a-z0-9]{0,62})?:key/.{0,1019}$") + try self.tags?.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) } private enum CodingKeys: String, CodingKey { case clientToken = "clientToken" - case data = "data" - case eventGroupId = "eventGroupId" - case eventTimestamp = "eventTimestamp" - case eventType = "eventType" + case instanceDescription = "instanceDescription" + case instanceName = "instanceName" + case kmsKeyArn = "kmsKeyArn" + case tags = "tags" } } - public struct SendDataIntegrationEventResponse: AWSDecodableShape { - /// The unique event identifier. - public let eventId: String + public struct CreateInstanceResponse: AWSDecodableShape { + /// The AWS Supply Chain instance resource data details. + public let instance: Instance @inlinable - public init(eventId: String) { - self.eventId = eventId + public init(instance: Instance) { + self.instance = instance } private enum CodingKeys: String, CodingKey { - case eventId = "eventId" + case instance = "instance" + } + } + + public struct DataIntegrationFlow: AWSDecodableShape { + /// The DataIntegrationFlow creation timestamp. + public let createdTime: Date + /// The DataIntegrationFlow instance ID. + public let instanceId: String + /// The DataIntegrationFlow last modified timestamp. + public let lastModifiedTime: Date + /// The DataIntegrationFlow name. + public let name: String + /// The DataIntegrationFlow source configurations. + public let sources: [DataIntegrationFlowSource] + /// The DataIntegrationFlow target configuration. + public let target: DataIntegrationFlowTarget + /// The DataIntegrationFlow transformation configurations. + public let transformation: DataIntegrationFlowTransformation + + @inlinable + public init(createdTime: Date, instanceId: String, lastModifiedTime: Date, name: String, sources: [DataIntegrationFlowSource], target: DataIntegrationFlowTarget, transformation: DataIntegrationFlowTransformation) { + self.createdTime = createdTime + self.instanceId = instanceId + self.lastModifiedTime = lastModifiedTime + self.name = name + self.sources = sources + self.target = target + self.transformation = transformation + } + + private enum CodingKeys: String, CodingKey { + case createdTime = "createdTime" + case instanceId = "instanceId" + case lastModifiedTime = "lastModifiedTime" + case name = "name" + case sources = "sources" + case target = "target" + case transformation = "transformation" + } + } + + public struct DataIntegrationFlowDatasetOptions: AWSEncodableShape & AWSDecodableShape { + /// The dataset load option to remove duplicates. + public let dedupeRecords: Bool? + /// The dataset data load type in dataset options. + public let loadType: DataIntegrationFlowLoadType? + + @inlinable + public init(dedupeRecords: Bool? = nil, loadType: DataIntegrationFlowLoadType? = nil) { + self.dedupeRecords = dedupeRecords + self.loadType = loadType + } + + private enum CodingKeys: String, CodingKey { + case dedupeRecords = "dedupeRecords" + case loadType = "loadType" + } + } + + public struct DataIntegrationFlowDatasetSourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The ARN of the dataset. + public let datasetIdentifier: String + /// The dataset DataIntegrationFlow source options. + public let options: DataIntegrationFlowDatasetOptions? + + @inlinable + public init(datasetIdentifier: String, options: DataIntegrationFlowDatasetOptions? = nil) { + self.datasetIdentifier = datasetIdentifier + self.options = options + } + + public func validate(name: String) throws { + try self.validate(self.datasetIdentifier, name: "datasetIdentifier", parent: name, max: 1011) + try self.validate(self.datasetIdentifier, name: "datasetIdentifier", parent: name, min: 1) + try self.validate(self.datasetIdentifier, name: "datasetIdentifier", parent: name, pattern: "^[-_/A-Za-z0-9:]+$") + } + + private enum CodingKeys: String, CodingKey { + case datasetIdentifier = "datasetIdentifier" + case options = "options" + } + } + + public struct DataIntegrationFlowDatasetTargetConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The dataset ARN. + public let datasetIdentifier: String + /// The dataset DataIntegrationFlow target options. + public let options: DataIntegrationFlowDatasetOptions? + + @inlinable + public init(datasetIdentifier: String, options: DataIntegrationFlowDatasetOptions? = nil) { + self.datasetIdentifier = datasetIdentifier + self.options = options + } + + public func validate(name: String) throws { + try self.validate(self.datasetIdentifier, name: "datasetIdentifier", parent: name, max: 1011) + try self.validate(self.datasetIdentifier, name: "datasetIdentifier", parent: name, min: 1) + try self.validate(self.datasetIdentifier, name: "datasetIdentifier", parent: name, pattern: "^[-_/A-Za-z0-9:]+$") + } + + private enum CodingKeys: String, CodingKey { + case datasetIdentifier = "datasetIdentifier" + case options = "options" + } + } + + public struct DataIntegrationFlowS3Options: AWSEncodableShape & AWSDecodableShape { + /// The Amazon S3 file type in S3 options. + public let fileType: DataIntegrationFlowFileType? + + @inlinable + public init(fileType: DataIntegrationFlowFileType? = nil) { + self.fileType = fileType + } + + private enum CodingKeys: String, CodingKey { + case fileType = "fileType" + } + } + + public struct DataIntegrationFlowS3SourceConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The bucketName of the S3 source objects. + public let bucketName: String + /// The other options of the S3 DataIntegrationFlow source. + public let options: DataIntegrationFlowS3Options? + /// The prefix of the S3 source objects. + public let prefix: String + + @inlinable + public init(bucketName: String, options: DataIntegrationFlowS3Options? = nil, prefix: String) { + self.bucketName = bucketName + self.options = options + self.prefix = prefix + } + + public func validate(name: String) throws { + try self.validate(self.bucketName, name: "bucketName", parent: name, max: 63) + try self.validate(self.bucketName, name: "bucketName", parent: name, min: 3) + try self.validate(self.bucketName, name: "bucketName", parent: name, pattern: "^[a-z0-9][a-z0-9.-]*[a-z0-9]$") + try self.validate(self.prefix, name: "prefix", parent: name, max: 700) + try self.validate(self.prefix, name: "prefix", parent: name, pattern: "^[/A-Za-z0-9._-]+$") + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + case options = "options" + case prefix = "prefix" + } + } + + public struct DataIntegrationFlowS3TargetConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The bucketName of the S3 target objects. + public let bucketName: String + /// The S3 DataIntegrationFlow target options. + public let options: DataIntegrationFlowS3Options? + /// The prefix of the S3 target objects. + public let prefix: String + + @inlinable + public init(bucketName: String, options: DataIntegrationFlowS3Options? = nil, prefix: String) { + self.bucketName = bucketName + self.options = options + self.prefix = prefix + } + + public func validate(name: String) throws { + try self.validate(self.bucketName, name: "bucketName", parent: name, max: 63) + try self.validate(self.bucketName, name: "bucketName", parent: name, min: 3) + try self.validate(self.bucketName, name: "bucketName", parent: name, pattern: "^[a-z0-9][a-z0-9.-]*[a-z0-9]$") + try self.validate(self.prefix, name: "prefix", parent: name, max: 700) + try self.validate(self.prefix, name: "prefix", parent: name, pattern: "^[/A-Za-z0-9._-]+$") + } + + private enum CodingKeys: String, CodingKey { + case bucketName = "bucketName" + case options = "options" + case prefix = "prefix" + } + } + + public struct DataIntegrationFlowSQLTransformationConfiguration: AWSEncodableShape & AWSDecodableShape { + /// The transformation SQL query body based on SparkSQL. + public let query: String + + @inlinable + public init(query: String) { + self.query = query + } + + public func validate(name: String) throws { + try self.validate(self.query, name: "query", parent: name, max: 65535) + try self.validate(self.query, name: "query", parent: name, min: 1) + } + + private enum CodingKeys: String, CodingKey { + case query = "query" + } + } + + public struct DataIntegrationFlowSource: AWSEncodableShape & AWSDecodableShape { + /// The dataset DataIntegrationFlow source. + public let datasetSource: DataIntegrationFlowDatasetSourceConfiguration? + /// The S3 DataIntegrationFlow source. + public let s3Source: DataIntegrationFlowS3SourceConfiguration? + /// The DataIntegrationFlow source name that can be used as table alias in SQL transformation query. + public let sourceName: String + /// The DataIntegrationFlow source type. + public let sourceType: DataIntegrationFlowSourceType + + @inlinable + public init(datasetSource: DataIntegrationFlowDatasetSourceConfiguration? = nil, s3Source: DataIntegrationFlowS3SourceConfiguration? = nil, sourceName: String, sourceType: DataIntegrationFlowSourceType) { + self.datasetSource = datasetSource + self.s3Source = s3Source + self.sourceName = sourceName + self.sourceType = sourceType + } + + public func validate(name: String) throws { + try self.datasetSource?.validate(name: "\(name).datasetSource") + try self.s3Source?.validate(name: "\(name).s3Source") + try self.validate(self.sourceName, name: "sourceName", parent: name, max: 256) + try self.validate(self.sourceName, name: "sourceName", parent: name, min: 1) + try self.validate(self.sourceName, name: "sourceName", parent: name, pattern: "^[A-Za-z0-9_]+$") + } + + private enum CodingKeys: String, CodingKey { + case datasetSource = "datasetSource" + case s3Source = "s3Source" + case sourceName = "sourceName" + case sourceType = "sourceType" + } + } + + public struct DataIntegrationFlowTarget: AWSEncodableShape & AWSDecodableShape { + /// The dataset DataIntegrationFlow target. + public let datasetTarget: DataIntegrationFlowDatasetTargetConfiguration? + /// The S3 DataIntegrationFlow target. + public let s3Target: DataIntegrationFlowS3TargetConfiguration? + /// The DataIntegrationFlow target type. + public let targetType: DataIntegrationFlowTargetType + + @inlinable + public init(datasetTarget: DataIntegrationFlowDatasetTargetConfiguration? = nil, s3Target: DataIntegrationFlowS3TargetConfiguration? = nil, targetType: DataIntegrationFlowTargetType) { + self.datasetTarget = datasetTarget + self.s3Target = s3Target + self.targetType = targetType + } + + public func validate(name: String) throws { + try self.datasetTarget?.validate(name: "\(name).datasetTarget") + try self.s3Target?.validate(name: "\(name).s3Target") + } + + private enum CodingKeys: String, CodingKey { + case datasetTarget = "datasetTarget" + case s3Target = "s3Target" + case targetType = "targetType" + } + } + + public struct DataIntegrationFlowTransformation: AWSEncodableShape & AWSDecodableShape { + /// The SQL DataIntegrationFlow transformation configuration. + public let sqlTransformation: DataIntegrationFlowSQLTransformationConfiguration? + /// The DataIntegrationFlow transformation type. + public let transformationType: DataIntegrationFlowTransformationType + + @inlinable + public init(sqlTransformation: DataIntegrationFlowSQLTransformationConfiguration? = nil, transformationType: DataIntegrationFlowTransformationType) { + self.sqlTransformation = sqlTransformation + self.transformationType = transformationType + } + + public func validate(name: String) throws { + try self.sqlTransformation?.validate(name: "\(name).sqlTransformation") + } + + private enum CodingKeys: String, CodingKey { + case sqlTransformation = "sqlTransformation" + case transformationType = "transformationType" + } + } + + public struct DataLakeDataset: AWSDecodableShape { + /// The arn of the dataset. + public let arn: String + /// The creation time of the dataset. + public let createdTime: Date + /// The description of the dataset. + public let description: String? + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The last modified time of the dataset. + public let lastModifiedTime: Date + /// The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + public let name: String + /// The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + public let namespace: String + /// The schema of the dataset. + public let schema: DataLakeDatasetSchema + + @inlinable + public init(arn: String, createdTime: Date, description: String? = nil, instanceId: String, lastModifiedTime: Date, name: String, namespace: String, schema: DataLakeDatasetSchema) { + self.arn = arn + self.createdTime = createdTime + self.description = description + self.instanceId = instanceId + self.lastModifiedTime = lastModifiedTime + self.name = name + self.namespace = namespace + self.schema = schema + } + + private enum CodingKeys: String, CodingKey { + case arn = "arn" + case createdTime = "createdTime" + case description = "description" + case instanceId = "instanceId" + case lastModifiedTime = "lastModifiedTime" + case name = "name" + case namespace = "namespace" + case schema = "schema" + } + } + + public struct DataLakeDatasetSchema: AWSEncodableShape & AWSDecodableShape { + /// The list of field details of the dataset schema. + public let fields: [DataLakeDatasetSchemaField] + /// The name of the dataset schema. + public let name: String + + @inlinable + public init(fields: [DataLakeDatasetSchemaField], name: String) { + self.fields = fields + self.name = name + } + + public func validate(name: String) throws { + try self.fields.forEach { + try $0.validate(name: "\(name).fields[]") + } + try self.validate(self.fields, name: "fields", parent: name, max: 500) + try self.validate(self.fields, name: "fields", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, max: 100) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9]+$") + } + + private enum CodingKeys: String, CodingKey { + case fields = "fields" + case name = "name" + } + } + + public struct DataLakeDatasetSchemaField: AWSEncodableShape & AWSDecodableShape { + /// Indicate if the field is required or not. + public let isRequired: Bool + /// The dataset field name. + public let name: String + /// The dataset field type. + public let type: DataLakeDatasetSchemaFieldType + + @inlinable + public init(isRequired: Bool, name: String, type: DataLakeDatasetSchemaFieldType) { + self.isRequired = isRequired + self.name = name + self.type = type + } + + public func validate(name: String) throws { + try self.validate(self.name, name: "name", parent: name, max: 100) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-z0-9_]+$") + } + + private enum CodingKeys: String, CodingKey { + case isRequired = "isRequired" + case name = "name" + case type = "type" + } + } + + public struct DeleteDataIntegrationFlowRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The name of the DataIntegrationFlow to be deleted. + public let name: String + + @inlinable + public init(instanceId: String, name: String) { + self.instanceId = instanceId + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteDataIntegrationFlowResponse: AWSDecodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The name of the DataIntegrationFlow deleted. + public let name: String + + @inlinable + public init(instanceId: String, name: String) { + self.instanceId = instanceId + self.name = name + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "instanceId" + case name = "name" + } + } + + public struct DeleteDataLakeDatasetRequest: AWSEncodableShape { + /// The AWS Supply Chain instance identifier. + public let instanceId: String + /// The name of the dataset. If the namespace is asc, the name must be one of the supported data entities . + public let name: String + /// The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + public let namespace: String + + @inlinable + public init(instanceId: String, name: String, namespace: String) { + self.instanceId = instanceId + self.name = name + self.namespace = namespace + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + request.encodePath(self.namespace, key: "namespace") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 75) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-z0-9_]+$") + try self.validate(self.namespace, name: "namespace", parent: name, max: 50) + try self.validate(self.namespace, name: "namespace", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, pattern: "^[a-z]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteDataLakeDatasetResponse: AWSDecodableShape { + /// The AWS Supply Chain instance identifier. + public let instanceId: String + /// The name of deleted dataset. + public let name: String + /// The namespace of deleted dataset. + public let namespace: String + + @inlinable + public init(instanceId: String, name: String, namespace: String) { + self.instanceId = instanceId + self.name = name + self.namespace = namespace + } + + private enum CodingKeys: String, CodingKey { + case instanceId = "instanceId" + case name = "name" + case namespace = "namespace" + } + } + + public struct DeleteInstanceRequest: AWSEncodableShape { + /// The AWS Supply Chain instance identifier. + public let instanceId: String + + @inlinable + public init(instanceId: String) { + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct DeleteInstanceResponse: AWSDecodableShape { + /// The AWS Supply Chain instance resource data details. + public let instance: Instance + + @inlinable + public init(instance: Instance) { + self.instance = instance + } + + private enum CodingKeys: String, CodingKey { + case instance = "instance" + } + } + + public struct GetBillOfMaterialsImportJobRequest: AWSEncodableShape { + /// The AWS Supply Chain instance identifier. + public let instanceId: String + /// The BillOfMaterialsImportJob identifier. + public let jobId: String + + @inlinable + public init(instanceId: String, jobId: String) { + self.instanceId = instanceId + self.jobId = jobId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.jobId, key: "jobId") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.jobId, name: "jobId", parent: name, max: 36) + try self.validate(self.jobId, name: "jobId", parent: name, min: 36) + try self.validate(self.jobId, name: "jobId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetBillOfMaterialsImportJobResponse: AWSDecodableShape { + /// The BillOfMaterialsImportJob. + public let job: BillOfMaterialsImportJob + + @inlinable + public init(job: BillOfMaterialsImportJob) { + self.job = job + } + + private enum CodingKeys: String, CodingKey { + case job = "job" + } + } + + public struct GetDataIntegrationFlowRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The name of the DataIntegrationFlow created. + public let name: String + + @inlinable + public init(instanceId: String, name: String) { + self.instanceId = instanceId + self.name = name + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetDataIntegrationFlowResponse: AWSDecodableShape { + /// The details of the DataIntegrationFlow returned. + public let flow: DataIntegrationFlow + + @inlinable + public init(flow: DataIntegrationFlow) { + self.flow = flow + } + + private enum CodingKeys: String, CodingKey { + case flow = "flow" + } + } + + public struct GetDataLakeDatasetRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + public let name: String + /// The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + public let namespace: String + + @inlinable + public init(instanceId: String, name: String, namespace: String) { + self.instanceId = instanceId + self.name = name + self.namespace = namespace + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + request.encodePath(self.namespace, key: "namespace") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 75) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-z0-9_]+$") + try self.validate(self.namespace, name: "namespace", parent: name, max: 50) + try self.validate(self.namespace, name: "namespace", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, pattern: "^[a-z]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetDataLakeDatasetResponse: AWSDecodableShape { + /// The fetched dataset details. + public let dataset: DataLakeDataset + + @inlinable + public init(dataset: DataLakeDataset) { + self.dataset = dataset + } + + private enum CodingKeys: String, CodingKey { + case dataset = "dataset" + } + } + + public struct GetInstanceRequest: AWSEncodableShape { + /// The AWS Supply Chain instance identifier + public let instanceId: String + + @inlinable + public init(instanceId: String) { + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetInstanceResponse: AWSDecodableShape { + /// The instance resource data details. + public let instance: Instance + + @inlinable + public init(instance: Instance) { + self.instance = instance + } + + private enum CodingKeys: String, CodingKey { + case instance = "instance" + } + } + + public struct Instance: AWSDecodableShape { + /// The Amazon Web Services account ID that owns the instance. + public let awsAccountId: String + /// The instance creation timestamp. + public let createdTime: Date? + /// The Amazon Web Services Supply Chain instance description. + public let instanceDescription: String? + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The Amazon Web Services Supply Chain instance name. + public let instanceName: String? + /// The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you optionally provided for encryption. If you did not provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key and nothing is returned. + public let kmsKeyArn: String? + /// The instance last modified timestamp. + public let lastModifiedTime: Date? + /// The state of the instance. + public let state: InstanceState + /// The version number of the instance. + public let versionNumber: Double? + /// The WebApp DNS domain name of the instance. + public let webAppDnsDomain: String? + + @inlinable + public init(awsAccountId: String, createdTime: Date? = nil, instanceDescription: String? = nil, instanceId: String, instanceName: String? = nil, kmsKeyArn: String? = nil, lastModifiedTime: Date? = nil, state: InstanceState, versionNumber: Double? = nil, webAppDnsDomain: String? = nil) { + self.awsAccountId = awsAccountId + self.createdTime = createdTime + self.instanceDescription = instanceDescription + self.instanceId = instanceId + self.instanceName = instanceName + self.kmsKeyArn = kmsKeyArn + self.lastModifiedTime = lastModifiedTime + self.state = state + self.versionNumber = versionNumber + self.webAppDnsDomain = webAppDnsDomain + } + + private enum CodingKeys: String, CodingKey { + case awsAccountId = "awsAccountId" + case createdTime = "createdTime" + case instanceDescription = "instanceDescription" + case instanceId = "instanceId" + case instanceName = "instanceName" + case kmsKeyArn = "kmsKeyArn" + case lastModifiedTime = "lastModifiedTime" + case state = "state" + case versionNumber = "versionNumber" + case webAppDnsDomain = "webAppDnsDomain" + } + } + + public struct ListDataIntegrationFlowsRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// Specify the maximum number of DataIntegrationFlows to fetch in one paginated request. + public let maxResults: Int? + /// The pagination token to fetch the next page of the DataIntegrationFlows. + public let nextToken: String? + + @inlinable + public init(instanceId: String, maxResults: Int? = nil, nextToken: String? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 20) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListDataIntegrationFlowsResponse: AWSDecodableShape { + /// The response parameters for ListDataIntegrationFlows. + public let flows: [DataIntegrationFlow] + /// The pagination token to fetch the next page of the DataIntegrationFlows. + public let nextToken: String? + + @inlinable + public init(flows: [DataIntegrationFlow], nextToken: String? = nil) { + self.flows = flows + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case flows = "flows" + case nextToken = "nextToken" + } + } + + public struct ListDataLakeDatasetsRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The max number of datasets to fetch in this paginated request. + public let maxResults: Int? + /// The namespace of the dataset. The available values are: asc: for AWS Supply Chain supported datasets . default: for datasets with custom user-defined schemas. + public let namespace: String + /// The pagination token to fetch next page of datasets. + public let nextToken: String? + + @inlinable + public init(instanceId: String, maxResults: Int? = nil, namespace: String, nextToken: String? = nil) { + self.instanceId = instanceId + self.maxResults = maxResults + self.namespace = namespace + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodePath(self.namespace, key: "namespace") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 20) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) + try self.validate(self.namespace, name: "namespace", parent: name, max: 50) + try self.validate(self.namespace, name: "namespace", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, pattern: "^[a-z]+$") + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 65535) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListDataLakeDatasetsResponse: AWSDecodableShape { + /// The list of fetched dataset details. + public let datasets: [DataLakeDataset] + /// The pagination token to fetch next page of datasets. + public let nextToken: String? + + @inlinable + public init(datasets: [DataLakeDataset], nextToken: String? = nil) { + self.datasets = datasets + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case datasets = "datasets" + case nextToken = "nextToken" + } + } + + public struct ListInstancesRequest: AWSEncodableShape { + /// The filter to ListInstances based on their names. + public let instanceNameFilter: [String]? + /// The filter to ListInstances based on their state. + public let instanceStateFilter: [InstanceState]? + /// Specify the maximum number of instances to fetch in this paginated request. + public let maxResults: Int? + /// The pagination token to fetch the next page of instances. + public let nextToken: String? + + @inlinable + public init(instanceNameFilter: [String]? = nil, instanceStateFilter: [InstanceState]? = nil, maxResults: Int? = nil, nextToken: String? = nil) { + self.instanceNameFilter = instanceNameFilter + self.instanceStateFilter = instanceStateFilter + self.maxResults = maxResults + self.nextToken = nextToken + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.instanceNameFilter, key: "instanceNameFilter") + request.encodeQuery(self.instanceStateFilter, key: "instanceStateFilter") + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + } + + public func validate(name: String) throws { + try self.instanceNameFilter?.forEach { + try validate($0, name: "instanceNameFilter[]", parent: name, max: 63) + try validate($0, name: "instanceNameFilter[]", parent: name, pattern: "^(?![ _ʼ'%-])[a-zA-Z0-9 _ʼ'%-]{0,62}[a-zA-Z0-9]$") + } + try self.validate(self.instanceNameFilter, name: "instanceNameFilter", parent: name, max: 10) + try self.validate(self.instanceStateFilter, name: "instanceStateFilter", parent: name, max: 6) + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 20) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 0) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 1024) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListInstancesResponse: AWSDecodableShape { + /// The list of instances resource data details. + public let instances: [Instance] + /// The pagination token to fetch the next page of instances. + public let nextToken: String? + + @inlinable + public init(instances: [Instance], nextToken: String? = nil) { + self.instances = instances + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case instances = "instances" + case nextToken = "nextToken" + } + } + + public struct ListTagsForResourceRequest: AWSEncodableShape { + /// The Amazon Web Services Supply chain resource ARN that needs tags to be listed. + public let resourceArn: String + + @inlinable + public init(resourceArn: String) { + self.resourceArn = resourceArn + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:scn(?::([a-z0-9-]+):([0-9]+):instance)?/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})[-_./A-Za-z0-9]*$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListTagsForResourceResponse: AWSDecodableShape { + /// The tags added to an Amazon Web Services Supply Chain resource. + public let tags: [String: String] + + @inlinable + public init(tags: [String: String]) { + self.tags = tags + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct SendDataIntegrationEventRequest: AWSEncodableShape { + /// The idempotent client token. + public let clientToken: String? + /// The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain . + public let data: String + /// Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning. + public let eventGroupId: String + /// The event timestamp (in epoch seconds). + public let eventTimestamp: Date? + /// The data event type. + public let eventType: DataIntegrationEventType + /// The AWS Supply Chain instance identifier. + public let instanceId: String + + @inlinable + public init(clientToken: String? = SendDataIntegrationEventRequest.idempotencyToken(), data: String, eventGroupId: String, eventTimestamp: Date? = nil, eventType: DataIntegrationEventType, instanceId: String) { + self.clientToken = clientToken + self.data = data + self.eventGroupId = eventGroupId + self.eventTimestamp = eventTimestamp + self.eventType = eventType + self.instanceId = instanceId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.clientToken, forKey: .clientToken) + try container.encode(self.data, forKey: .data) + try container.encode(self.eventGroupId, forKey: .eventGroupId) + try container.encodeIfPresent(self.eventTimestamp, forKey: .eventTimestamp) + try container.encode(self.eventType, forKey: .eventType) + request.encodePath(self.instanceId, key: "instanceId") + } + + public func validate(name: String) throws { + try self.validate(self.clientToken, name: "clientToken", parent: name, max: 126) + try self.validate(self.clientToken, name: "clientToken", parent: name, min: 33) + try self.validate(self.data, name: "data", parent: name, max: 1048576) + try self.validate(self.data, name: "data", parent: name, min: 1) + try self.validate(self.eventGroupId, name: "eventGroupId", parent: name, max: 255) + try self.validate(self.eventGroupId, name: "eventGroupId", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + } + + private enum CodingKeys: String, CodingKey { + case clientToken = "clientToken" + case data = "data" + case eventGroupId = "eventGroupId" + case eventTimestamp = "eventTimestamp" + case eventType = "eventType" + } + } + + public struct SendDataIntegrationEventResponse: AWSDecodableShape { + /// The unique event identifier. + public let eventId: String + + @inlinable + public init(eventId: String) { + self.eventId = eventId + } + + private enum CodingKeys: String, CodingKey { + case eventId = "eventId" + } + } + + public struct TagResourceRequest: AWSEncodableShape { + /// The Amazon Web Services Supply chain resource ARN that needs to be tagged. + public let resourceArn: String + /// The tags of the Amazon Web Services Supply chain resource to be created. + public let tags: [String: String] + + @inlinable + public init(resourceArn: String, tags: [String: String]) { + self.resourceArn = resourceArn + self.tags = tags + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + try container.encode(self.tags, forKey: .tags) + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:scn(?::([a-z0-9-]+):([0-9]+):instance)?/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})[-_./A-Za-z0-9]*$") + try self.tags.forEach { + try validate($0.key, name: "tags.key", parent: name, max: 128) + try validate($0.key, name: "tags.key", parent: name, min: 1) + try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) + } + try self.validate(self.tags, name: "tags", parent: name, max: 200) + } + + private enum CodingKeys: String, CodingKey { + case tags = "tags" + } + } + + public struct TagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UntagResourceRequest: AWSEncodableShape { + /// The Amazon Web Services Supply chain resource ARN that needs to be untagged. + public let resourceArn: String + /// The list of tag keys to be deleted for an Amazon Web Services Supply Chain resource. + public let tagKeys: [String] + + @inlinable + public init(resourceArn: String, tagKeys: [String]) { + self.resourceArn = resourceArn + self.tagKeys = tagKeys + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.resourceArn, key: "resourceArn") + request.encodeQuery(self.tagKeys, key: "tagKeys") + } + + public func validate(name: String) throws { + try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 1011) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) + try self.validate(self.resourceArn, name: "resourceArn", parent: name, pattern: "^arn:aws:scn(?::([a-z0-9-]+):([0-9]+):instance)?/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})[-_./A-Za-z0-9]*$") + try self.tagKeys.forEach { + try validate($0, name: "tagKeys[]", parent: name, max: 128) + try validate($0, name: "tagKeys[]", parent: name, min: 1) + } + try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 200) + } + + private enum CodingKeys: CodingKey {} + } + + public struct UntagResourceResponse: AWSDecodableShape { + public init() {} + } + + public struct UpdateDataIntegrationFlowRequest: AWSEncodableShape { + /// The Amazon Web Services Supply Chain instance identifier. + public let instanceId: String + /// The name of the DataIntegrationFlow to be updated. + public let name: String + /// The new source configurations for the DataIntegrationFlow. + public let sources: [DataIntegrationFlowSource]? + /// The new target configurations for the DataIntegrationFlow. + public let target: DataIntegrationFlowTarget? + /// The new transformation configurations for the DataIntegrationFlow. + public let transformation: DataIntegrationFlowTransformation? + + @inlinable + public init(instanceId: String, name: String, sources: [DataIntegrationFlowSource]? = nil, target: DataIntegrationFlowTarget? = nil, transformation: DataIntegrationFlowTransformation? = nil) { + self.instanceId = instanceId + self.name = name + self.sources = sources + self.target = target + self.transformation = transformation + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + try container.encodeIfPresent(self.sources, forKey: .sources) + try container.encodeIfPresent(self.target, forKey: .target) + try container.encodeIfPresent(self.transformation, forKey: .transformation) + } + + public func validate(name: String) throws { + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 256) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[A-Za-z0-9-]+$") + try self.sources?.forEach { + try $0.validate(name: "\(name).sources[]") + } + try self.validate(self.sources, name: "sources", parent: name, max: 40) + try self.validate(self.sources, name: "sources", parent: name, min: 1) + try self.target?.validate(name: "\(name).target") + try self.transformation?.validate(name: "\(name).transformation") + } + + private enum CodingKeys: String, CodingKey { + case sources = "sources" + case target = "target" + case transformation = "transformation" + } + } + + public struct UpdateDataIntegrationFlowResponse: AWSDecodableShape { + /// The details of the updated DataIntegrationFlow. + public let flow: DataIntegrationFlow + + @inlinable + public init(flow: DataIntegrationFlow) { + self.flow = flow + } + + private enum CodingKeys: String, CodingKey { + case flow = "flow" + } + } + + public struct UpdateDataLakeDatasetRequest: AWSEncodableShape { + /// The updated description of the data lake dataset. + public let description: String? + /// The Amazon Web Services Chain instance identifier. + public let instanceId: String + /// The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. + public let name: String + /// The name space of the dataset. The available values are: asc - For information on the Amazon Web Services Supply Chain supported datasets see https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html. default - For datasets with custom user-defined schemas. + public let namespace: String + + @inlinable + public init(description: String? = nil, instanceId: String, name: String, namespace: String) { + self.description = description + self.instanceId = instanceId + self.name = name + self.namespace = namespace + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.description, forKey: .description) + request.encodePath(self.instanceId, key: "instanceId") + request.encodePath(self.name, key: "name") + request.encodePath(self.namespace, key: "namespace") + } + + public func validate(name: String) throws { + try self.validate(self.description, name: "description", parent: name, max: 500) + try self.validate(self.description, name: "description", parent: name, min: 1) + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.name, name: "name", parent: name, max: 75) + try self.validate(self.name, name: "name", parent: name, min: 1) + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-z0-9_]+$") + try self.validate(self.namespace, name: "namespace", parent: name, max: 50) + try self.validate(self.namespace, name: "namespace", parent: name, min: 1) + try self.validate(self.namespace, name: "namespace", parent: name, pattern: "^[a-z]+$") + } + + private enum CodingKeys: String, CodingKey { + case description = "description" + } + } + + public struct UpdateDataLakeDatasetResponse: AWSDecodableShape { + /// The updated dataset details. + public let dataset: DataLakeDataset + + @inlinable + public init(dataset: DataLakeDataset) { + self.dataset = dataset + } + + private enum CodingKeys: String, CodingKey { + case dataset = "dataset" + } + } + + public struct UpdateInstanceRequest: AWSEncodableShape { + /// The AWS Supply Chain instance description. + public let instanceDescription: String? + /// The AWS Supply Chain instance identifier. + public let instanceId: String + /// The AWS Supply Chain instance name. + public let instanceName: String? + + @inlinable + public init(instanceDescription: String? = nil, instanceId: String, instanceName: String? = nil) { + self.instanceDescription = instanceDescription + self.instanceId = instanceId + self.instanceName = instanceName + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encodeIfPresent(self.instanceDescription, forKey: .instanceDescription) + request.encodePath(self.instanceId, key: "instanceId") + try container.encodeIfPresent(self.instanceName, forKey: .instanceName) + } + + public func validate(name: String) throws { + try self.validate(self.instanceDescription, name: "instanceDescription", parent: name, max: 501) + try self.validate(self.instanceDescription, name: "instanceDescription", parent: name, pattern: "^([a-zA-Z0-9., _ʼ'%-]){0,500}$") + try self.validate(self.instanceId, name: "instanceId", parent: name, max: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, min: 36) + try self.validate(self.instanceId, name: "instanceId", parent: name, pattern: "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") + try self.validate(self.instanceName, name: "instanceName", parent: name, max: 63) + try self.validate(self.instanceName, name: "instanceName", parent: name, pattern: "^(?![ _ʼ'%-])[a-zA-Z0-9 _ʼ'%-]{0,62}[a-zA-Z0-9]$") + } + + private enum CodingKeys: String, CodingKey { + case instanceDescription = "instanceDescription" + case instanceName = "instanceName" + } + } + + public struct UpdateInstanceResponse: AWSDecodableShape { + /// The instance resource data details. + public let instance: Instance + + @inlinable + public init(instance: Instance) { + self.instance = instance + } + + private enum CodingKeys: String, CodingKey { + case instance = "instance" } } } diff --git a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift index 1326cb8fde..2475d3aa5d 100644 --- a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift +++ b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_api.swift @@ -106,6 +106,7 @@ public struct TimestreamInfluxDB: AWSService { /// - name: The name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region. /// - organization: The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users. /// - password: The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account. + /// - port: The port number on which InfluxDB accepts connections. Valid Values: 1024-65535 Default: 8086 Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 /// - publiclyAccessible: Configures the DB instance with a public IP to facilitate access. /// - tags: A list of key-value pairs to associate with the DB instance. /// - username: The username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. For example, my-user1. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Secrets Manager in your account. @@ -124,6 +125,7 @@ public struct TimestreamInfluxDB: AWSService { name: String, organization: String? = nil, password: String, + port: Int? = nil, publiclyAccessible: Bool? = nil, tags: [String: String]? = nil, username: String? = nil, @@ -142,6 +144,7 @@ public struct TimestreamInfluxDB: AWSService { name: name, organization: organization, password: password, + port: port, publiclyAccessible: publiclyAccessible, tags: tags, username: username, @@ -454,6 +457,7 @@ public struct TimestreamInfluxDB: AWSService { /// - deploymentType: Specifies whether the DB instance will be deployed as a standalone instance or with a Multi-AZ standby for high availability. /// - identifier: The id of the DB instance. /// - logDeliveryConfiguration: Configuration for sending InfluxDB engine logs to send to specified S3 bucket. + /// - port: The port number on which InfluxDB accepts connections. If you change the Port value, your database restarts immediately. Valid Values: 1024-65535 Default: 8086 Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 /// - logger: Logger use during operation @inlinable public func updateDbInstance( @@ -462,6 +466,7 @@ public struct TimestreamInfluxDB: AWSService { deploymentType: DeploymentType? = nil, identifier: String, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, + port: Int? = nil, logger: Logger = AWSClient.loggingDisabled ) async throws -> UpdateDbInstanceOutput { let input = UpdateDbInstanceInput( @@ -469,7 +474,8 @@ public struct TimestreamInfluxDB: AWSService { dbParameterGroupIdentifier: dbParameterGroupIdentifier, deploymentType: deploymentType, identifier: identifier, - logDeliveryConfiguration: logDeliveryConfiguration + logDeliveryConfiguration: logDeliveryConfiguration, + port: port ) return try await self.updateDbInstance(input, logger: logger) } diff --git a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift index b86b74446b..3f98e1b880 100644 --- a/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift +++ b/Sources/Soto/Services/TimestreamInfluxDB/TimestreamInfluxDB_shapes.swift @@ -51,6 +51,14 @@ extension TimestreamInfluxDB { public var description: String { return self.rawValue } } + public enum DurationType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case hours = "hours" + case milliseconds = "milliseconds" + case minutes = "minutes" + case seconds = "seconds" + public var description: String { return self.rawValue } + } + public enum LogLevel: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case debug = "debug" case error = "error" @@ -100,6 +108,8 @@ extension TimestreamInfluxDB { public let organization: String? /// The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account. public let password: String + /// The port number on which InfluxDB accepts connections. Valid Values: 1024-65535 Default: 8086 Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 + public let port: Int? /// Configures the DB instance with a public IP to facilitate access. public let publiclyAccessible: Bool? /// A list of key-value pairs to associate with the DB instance. @@ -112,7 +122,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int, bucket: String? = nil, dbInstanceType: DbInstanceType, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, organization: String? = nil, password: String, publiclyAccessible: Bool? = nil, tags: [String: String]? = nil, username: String? = nil, vpcSecurityGroupIds: [String], vpcSubnetIds: [String]) { + public init(allocatedStorage: Int, bucket: String? = nil, dbInstanceType: DbInstanceType, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, organization: String? = nil, password: String, port: Int? = nil, publiclyAccessible: Bool? = nil, tags: [String: String]? = nil, username: String? = nil, vpcSecurityGroupIds: [String], vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.bucket = bucket self.dbInstanceType = dbInstanceType @@ -123,6 +133,7 @@ extension TimestreamInfluxDB { self.name = name self.organization = organization self.password = password + self.port = port self.publiclyAccessible = publiclyAccessible self.tags = tags self.username = username @@ -141,12 +152,14 @@ extension TimestreamInfluxDB { try self.validate(self.dbParameterGroupIdentifier, name: "dbParameterGroupIdentifier", parent: name, pattern: "^[a-zA-Z0-9]+$") try self.validate(self.name, name: "name", parent: name, max: 40) try self.validate(self.name, name: "name", parent: name, min: 3) - try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$") try self.validate(self.organization, name: "organization", parent: name, max: 64) try self.validate(self.organization, name: "organization", parent: name, min: 1) try self.validate(self.password, name: "password", parent: name, max: 64) try self.validate(self.password, name: "password", parent: name, min: 8) try self.validate(self.password, name: "password", parent: name, pattern: "^[a-zA-Z0-9]+$") + try self.validate(self.port, name: "port", parent: name, max: 65535) + try self.validate(self.port, name: "port", parent: name, min: 1024) try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -181,6 +194,7 @@ extension TimestreamInfluxDB { case name = "name" case organization = "organization" case password = "password" + case port = "port" case publiclyAccessible = "publiclyAccessible" case tags = "tags" case username = "username" @@ -214,6 +228,8 @@ extension TimestreamInfluxDB { public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. public let name: String + /// The port number on which InfluxDB accepts connections. The default value is 8086. + public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. public let publiclyAccessible: Bool? /// The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. @@ -226,7 +242,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -239,6 +255,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone self.status = status @@ -259,6 +276,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" case status = "status" @@ -288,7 +306,7 @@ extension TimestreamInfluxDB { public func validate(name: String) throws { try self.validate(self.name, name: "name", parent: name, max: 64) try self.validate(self.name, name: "name", parent: name, min: 3) - try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$") + try self.validate(self.name, name: "name", parent: name, pattern: "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$") try self.tags?.forEach { try validate($0.key, name: "tags.key", parent: name, max: 128) try validate($0.key, name: "tags.key", parent: name, min: 1) @@ -353,11 +371,13 @@ extension TimestreamInfluxDB { public let id: String /// This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands. public let name: String + /// The port number on which InfluxDB accepts connections. + public let port: Int? /// The status of the DB instance. public let status: Status? @inlinable - public init(allocatedStorage: Int? = nil, arn: String, dbInstanceType: DbInstanceType? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, name: String, status: Status? = nil) { + public init(allocatedStorage: Int? = nil, arn: String, dbInstanceType: DbInstanceType? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, name: String, port: Int? = nil, status: Status? = nil) { self.allocatedStorage = allocatedStorage self.arn = arn self.dbInstanceType = dbInstanceType @@ -366,6 +386,7 @@ extension TimestreamInfluxDB { self.endpoint = endpoint self.id = id self.name = name + self.port = port self.status = status } @@ -378,6 +399,7 @@ extension TimestreamInfluxDB { case endpoint = "endpoint" case id = "id" case name = "name" + case port = "port" case status = "status" } } @@ -453,6 +475,8 @@ extension TimestreamInfluxDB { public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. public let name: String + /// The port number on which InfluxDB accepts connections. + public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. public let publiclyAccessible: Bool? /// The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. @@ -465,7 +489,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -478,6 +502,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone self.status = status @@ -498,6 +523,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" case status = "status" @@ -506,6 +532,24 @@ extension TimestreamInfluxDB { } } + public struct Duration: AWSEncodableShape & AWSDecodableShape { + /// The type of duration for InfluxDB parameters. + public let durationType: DurationType + /// The value of duration for InfluxDB parameters. + public let value: Int64 + + @inlinable + public init(durationType: DurationType, value: Int64) { + self.durationType = durationType + self.value = value + } + + private enum CodingKeys: String, CodingKey { + case durationType = "durationType" + case value = "value" + } + } + public struct GetDbInstanceInput: AWSEncodableShape { /// The id of the DB instance. public let identifier: String @@ -551,6 +595,8 @@ extension TimestreamInfluxDB { public let logDeliveryConfiguration: LogDeliveryConfiguration? /// The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. public let name: String + /// The port number on which InfluxDB accepts connections. + public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. public let publiclyAccessible: Bool? /// The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. @@ -563,7 +609,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -576,6 +622,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone self.status = status @@ -596,6 +643,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" case status = "status" @@ -657,38 +705,146 @@ extension TimestreamInfluxDB { public struct InfluxDBv2Parameters: AWSEncodableShape & AWSDecodableShape { /// Include option to show detailed logs for Flux queries. Default: false public let fluxLogEnabled: Bool? + /// Maximum duration the server should keep established connections alive while waiting for new requests. Set to 0 for no timeout. Default: 3 minutes + public let httpIdleTimeout: Duration? + /// Maximum duration the server should try to read HTTP headers for new requests. Set to 0 for no timeout. Default: 10 seconds + public let httpReadHeaderTimeout: Duration? + /// Maximum duration the server should try to read the entirety of new requests. Set to 0 for no timeout. Default: 0 + public let httpReadTimeout: Duration? + /// Maximum duration the server should spend processing and responding to write requests. Set to 0 for no timeout. Default: 0 + public let httpWriteTimeout: Duration? + /// Maximum number of group by time buckets a SELECT statement can create. 0 allows an unlimited number of buckets. Default: 0 + public let influxqlMaxSelectBuckets: Int64? + /// Maximum number of points a SELECT statement can process. 0 allows an unlimited number of points. InfluxDB checks the point count every second (so queries exceeding the maximum aren’t immediately aborted). Default: 0 + public let influxqlMaxSelectPoint: Int64? + /// Maximum number of series a SELECT statement can return. 0 allows an unlimited number of series. Default: 0 + public let influxqlMaxSelectSeries: Int64? /// Log output level. InfluxDB outputs log entries with severity levels greater than or equal to the level specified. Default: info public let logLevel: LogLevel? /// Disable the HTTP /metrics endpoint which exposes internal InfluxDB metrics. Default: false public let metricsDisabled: Bool? /// Disable the task scheduler. If problematic tasks prevent InfluxDB from starting, use this option to start InfluxDB without scheduling or executing tasks. Default: false public let noTasks: Bool? + /// Disable the /debug/pprof HTTP endpoint. This endpoint provides runtime profiling data and can be helpful when debugging. Default: false + public let pprofDisabled: Bool? /// Number of queries allowed to execute concurrently. Setting to 0 allows an unlimited number of concurrent queries. Default: 0 public let queryConcurrency: Int? + /// Initial bytes of memory allocated for a query. Default: 0 + public let queryInitialMemoryBytes: Int64? + /// Maximum number of queries allowed in execution queue. When queue limit is reached, new queries are rejected. Setting to 0 allows an unlimited number of queries in the queue. Default: 0 + public let queryMaxMemoryBytes: Int64? + /// Maximum bytes of memory allowed for a single query. Must be greater or equal to queryInitialMemoryBytes. Default: 0 + public let queryMemoryBytes: Int64? /// Maximum number of queries allowed in execution queue. When queue limit is reached, new queries are rejected. Setting to 0 allows an unlimited number of queries in the queue. Default: 0 public let queryQueueSize: Int? + /// Specifies the Time to Live (TTL) in minutes for newly created user sessions. Default: 60 + public let sessionLength: Int? + /// Disables automatically extending a user’s session TTL on each request. By default, every request sets the session’s expiration time to five minutes from now. When disabled, sessions expire after the specified session length and the user is redirected to the login page, even if recently active. Default: false + public let sessionRenewDisabled: Bool? + /// Maximum size (in bytes) a shard’s cache can reach before it starts rejecting writes. Must be greater than storageCacheSnapShotMemorySize and lower than instance’s total memory capacity. We recommend setting it to below 15% of the total memory capacity. Default: 1073741824 + public let storageCacheMaxMemorySize: Int64? + /// Size (in bytes) at which the storage engine will snapshot the cache and write it to a TSM file to make more memory available. Must not be greater than storageCacheMaxMemorySize. Default: 26214400 + public let storageCacheSnapshotMemorySize: Int64? + /// Duration at which the storage engine will snapshot the cache and write it to a new TSM file if the shard hasn’t received writes or deletes. Default: 10 minutes + public let storageCacheSnapshotWriteColdDuration: Duration? + /// Duration at which the storage engine will compact all TSM files in a shard if it hasn't received writes or deletes. Default: 4 hours + public let storageCompactFullWriteColdDuration: Duration? + /// Rate limit (in bytes per second) that TSM compactions can write to disk. Default: 50331648 + public let storageCompactThroughputBurst: Int64? + /// Maximum number of full and level compactions that can run concurrently. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater than zero limits compactions to that value. This setting does not apply to cache snapshotting. Default: 0 + public let storageMaxConcurrentCompactions: Int? + /// Size (in bytes) at which an index write-ahead log (WAL) file will compact into an index file. Lower sizes will cause log files to be compacted more quickly and result in lower heap usage at the expense of write throughput. Default: 1048576 + public let storageMaxIndexLogFileSize: Int64? + /// Skip field size validation on incoming write requests. Default: false + public let storageNoValidateFieldSize: Bool? + /// Interval of retention policy enforcement checks. Must be greater than 0. Default: 30 minutes + public let storageRetentionCheckInterval: Duration? + /// Maximum number of snapshot compactions that can run concurrently across all series partitions in a database. Default: 0 + public let storageSeriesFileMaxConcurrentSnapshotCompactions: Int? + /// Size of the internal cache used in the TSI index to store previously calculated series results. Cached results are returned quickly rather than needing to be recalculated when a subsequent query with the same tag key/value predicate is executed. Setting this value to 0 will disable the cache and may decrease query performance. Default: 100 + public let storageSeriesIdSetCacheSize: Int64? + /// Maximum number writes to the WAL directory to attempt at the same time. Setting this value to 0 results in number of processing units available x2. Default: 0 + public let storageWalMaxConcurrentWrites: Int? + /// Maximum amount of time a write request to the WAL directory will wait when the maximum number of concurrent active writes to the WAL directory has been met. Set to 0 to disable the timeout. Default: 10 minutes + public let storageWalMaxWriteDelay: Duration? /// Enable tracing in InfluxDB and specifies the tracing type. Tracing is disabled by default. public let tracingType: TracingType? + /// Disable the InfluxDB user interface (UI). The UI is enabled by default. Default: false + public let uiDisabled: Bool? @inlinable - public init(fluxLogEnabled: Bool? = nil, logLevel: LogLevel? = nil, metricsDisabled: Bool? = nil, noTasks: Bool? = nil, queryConcurrency: Int? = nil, queryQueueSize: Int? = nil, tracingType: TracingType? = nil) { + public init(fluxLogEnabled: Bool? = nil, httpIdleTimeout: Duration? = nil, httpReadHeaderTimeout: Duration? = nil, httpReadTimeout: Duration? = nil, httpWriteTimeout: Duration? = nil, influxqlMaxSelectBuckets: Int64? = nil, influxqlMaxSelectPoint: Int64? = nil, influxqlMaxSelectSeries: Int64? = nil, logLevel: LogLevel? = nil, metricsDisabled: Bool? = nil, noTasks: Bool? = nil, pprofDisabled: Bool? = nil, queryConcurrency: Int? = nil, queryInitialMemoryBytes: Int64? = nil, queryMaxMemoryBytes: Int64? = nil, queryMemoryBytes: Int64? = nil, queryQueueSize: Int? = nil, sessionLength: Int? = nil, sessionRenewDisabled: Bool? = nil, storageCacheMaxMemorySize: Int64? = nil, storageCacheSnapshotMemorySize: Int64? = nil, storageCacheSnapshotWriteColdDuration: Duration? = nil, storageCompactFullWriteColdDuration: Duration? = nil, storageCompactThroughputBurst: Int64? = nil, storageMaxConcurrentCompactions: Int? = nil, storageMaxIndexLogFileSize: Int64? = nil, storageNoValidateFieldSize: Bool? = nil, storageRetentionCheckInterval: Duration? = nil, storageSeriesFileMaxConcurrentSnapshotCompactions: Int? = nil, storageSeriesIdSetCacheSize: Int64? = nil, storageWalMaxConcurrentWrites: Int? = nil, storageWalMaxWriteDelay: Duration? = nil, tracingType: TracingType? = nil, uiDisabled: Bool? = nil) { self.fluxLogEnabled = fluxLogEnabled + self.httpIdleTimeout = httpIdleTimeout + self.httpReadHeaderTimeout = httpReadHeaderTimeout + self.httpReadTimeout = httpReadTimeout + self.httpWriteTimeout = httpWriteTimeout + self.influxqlMaxSelectBuckets = influxqlMaxSelectBuckets + self.influxqlMaxSelectPoint = influxqlMaxSelectPoint + self.influxqlMaxSelectSeries = influxqlMaxSelectSeries self.logLevel = logLevel self.metricsDisabled = metricsDisabled self.noTasks = noTasks + self.pprofDisabled = pprofDisabled self.queryConcurrency = queryConcurrency + self.queryInitialMemoryBytes = queryInitialMemoryBytes + self.queryMaxMemoryBytes = queryMaxMemoryBytes + self.queryMemoryBytes = queryMemoryBytes self.queryQueueSize = queryQueueSize + self.sessionLength = sessionLength + self.sessionRenewDisabled = sessionRenewDisabled + self.storageCacheMaxMemorySize = storageCacheMaxMemorySize + self.storageCacheSnapshotMemorySize = storageCacheSnapshotMemorySize + self.storageCacheSnapshotWriteColdDuration = storageCacheSnapshotWriteColdDuration + self.storageCompactFullWriteColdDuration = storageCompactFullWriteColdDuration + self.storageCompactThroughputBurst = storageCompactThroughputBurst + self.storageMaxConcurrentCompactions = storageMaxConcurrentCompactions + self.storageMaxIndexLogFileSize = storageMaxIndexLogFileSize + self.storageNoValidateFieldSize = storageNoValidateFieldSize + self.storageRetentionCheckInterval = storageRetentionCheckInterval + self.storageSeriesFileMaxConcurrentSnapshotCompactions = storageSeriesFileMaxConcurrentSnapshotCompactions + self.storageSeriesIdSetCacheSize = storageSeriesIdSetCacheSize + self.storageWalMaxConcurrentWrites = storageWalMaxConcurrentWrites + self.storageWalMaxWriteDelay = storageWalMaxWriteDelay self.tracingType = tracingType + self.uiDisabled = uiDisabled } private enum CodingKeys: String, CodingKey { case fluxLogEnabled = "fluxLogEnabled" + case httpIdleTimeout = "httpIdleTimeout" + case httpReadHeaderTimeout = "httpReadHeaderTimeout" + case httpReadTimeout = "httpReadTimeout" + case httpWriteTimeout = "httpWriteTimeout" + case influxqlMaxSelectBuckets = "influxqlMaxSelectBuckets" + case influxqlMaxSelectPoint = "influxqlMaxSelectPoint" + case influxqlMaxSelectSeries = "influxqlMaxSelectSeries" case logLevel = "logLevel" case metricsDisabled = "metricsDisabled" case noTasks = "noTasks" + case pprofDisabled = "pprofDisabled" case queryConcurrency = "queryConcurrency" + case queryInitialMemoryBytes = "queryInitialMemoryBytes" + case queryMaxMemoryBytes = "queryMaxMemoryBytes" + case queryMemoryBytes = "queryMemoryBytes" case queryQueueSize = "queryQueueSize" + case sessionLength = "sessionLength" + case sessionRenewDisabled = "sessionRenewDisabled" + case storageCacheMaxMemorySize = "storageCacheMaxMemorySize" + case storageCacheSnapshotMemorySize = "storageCacheSnapshotMemorySize" + case storageCacheSnapshotWriteColdDuration = "storageCacheSnapshotWriteColdDuration" + case storageCompactFullWriteColdDuration = "storageCompactFullWriteColdDuration" + case storageCompactThroughputBurst = "storageCompactThroughputBurst" + case storageMaxConcurrentCompactions = "storageMaxConcurrentCompactions" + case storageMaxIndexLogFileSize = "storageMaxIndexLogFileSize" + case storageNoValidateFieldSize = "storageNoValidateFieldSize" + case storageRetentionCheckInterval = "storageRetentionCheckInterval" + case storageSeriesFileMaxConcurrentSnapshotCompactions = "storageSeriesFileMaxConcurrentSnapshotCompactions" + case storageSeriesIdSetCacheSize = "storageSeriesIdSetCacheSize" + case storageWalMaxConcurrentWrites = "storageWalMaxConcurrentWrites" + case storageWalMaxWriteDelay = "storageWalMaxWriteDelay" case tracingType = "tracingType" + case uiDisabled = "uiDisabled" } } @@ -920,14 +1076,17 @@ extension TimestreamInfluxDB { public let identifier: String /// Configuration for sending InfluxDB engine logs to send to specified S3 bucket. public let logDeliveryConfiguration: LogDeliveryConfiguration? + /// The port number on which InfluxDB accepts connections. If you change the Port value, your database restarts immediately. Valid Values: 1024-65535 Default: 8086 Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 + public let port: Int? @inlinable - public init(dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, deploymentType: DeploymentType? = nil, identifier: String, logDeliveryConfiguration: LogDeliveryConfiguration? = nil) { + public init(dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, deploymentType: DeploymentType? = nil, identifier: String, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, port: Int? = nil) { self.dbInstanceType = dbInstanceType self.dbParameterGroupIdentifier = dbParameterGroupIdentifier self.deploymentType = deploymentType self.identifier = identifier self.logDeliveryConfiguration = logDeliveryConfiguration + self.port = port } public func validate(name: String) throws { @@ -937,6 +1096,8 @@ extension TimestreamInfluxDB { try self.validate(self.identifier, name: "identifier", parent: name, max: 64) try self.validate(self.identifier, name: "identifier", parent: name, min: 3) try self.validate(self.identifier, name: "identifier", parent: name, pattern: "^[a-zA-Z0-9]+$") + try self.validate(self.port, name: "port", parent: name, max: 65535) + try self.validate(self.port, name: "port", parent: name, min: 1024) } private enum CodingKeys: String, CodingKey { @@ -945,6 +1106,7 @@ extension TimestreamInfluxDB { case deploymentType = "deploymentType" case identifier = "identifier" case logDeliveryConfiguration = "logDeliveryConfiguration" + case port = "port" } } @@ -973,6 +1135,8 @@ extension TimestreamInfluxDB { public let logDeliveryConfiguration: LogDeliveryConfiguration? /// This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands. public let name: String + /// The port number on which InfluxDB accepts connections. + public let port: Int? /// Indicates if the DB instance has a public IP to facilitate access. public let publiclyAccessible: Bool? /// The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance. @@ -985,7 +1149,7 @@ extension TimestreamInfluxDB { public let vpcSubnetIds: [String] @inlinable - public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { + public init(allocatedStorage: Int? = nil, arn: String, availabilityZone: String? = nil, dbInstanceType: DbInstanceType? = nil, dbParameterGroupIdentifier: String? = nil, dbStorageType: DbStorageType? = nil, deploymentType: DeploymentType? = nil, endpoint: String? = nil, id: String, influxAuthParametersSecretArn: String? = nil, logDeliveryConfiguration: LogDeliveryConfiguration? = nil, name: String, port: Int? = nil, publiclyAccessible: Bool? = nil, secondaryAvailabilityZone: String? = nil, status: Status? = nil, vpcSecurityGroupIds: [String]? = nil, vpcSubnetIds: [String]) { self.allocatedStorage = allocatedStorage self.arn = arn self.availabilityZone = availabilityZone @@ -998,6 +1162,7 @@ extension TimestreamInfluxDB { self.influxAuthParametersSecretArn = influxAuthParametersSecretArn self.logDeliveryConfiguration = logDeliveryConfiguration self.name = name + self.port = port self.publiclyAccessible = publiclyAccessible self.secondaryAvailabilityZone = secondaryAvailabilityZone self.status = status @@ -1018,6 +1183,7 @@ extension TimestreamInfluxDB { case influxAuthParametersSecretArn = "influxAuthParametersSecretArn" case logDeliveryConfiguration = "logDeliveryConfiguration" case name = "name" + case port = "port" case publiclyAccessible = "publiclyAccessible" case secondaryAvailabilityZone = "secondaryAvailabilityZone" case status = "status" diff --git a/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_api.swift b/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_api.swift index 5d27fc242c..f670668348 100644 --- a/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_api.swift +++ b/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_api.swift @@ -66,6 +66,7 @@ public struct TranscribeStreaming: AWSService { serviceProtocol: .restjson, apiVersion: "2017-10-26", endpoint: endpoint, + variantEndpoints: Self.variantEndpoints, errorType: TranscribeStreamingErrorType.self, middleware: middleware, timeout: timeout, @@ -77,6 +78,17 @@ public struct TranscribeStreaming: AWSService { + /// FIPS and dualstack endpoints + static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ + [.fips]: .init(endpoints: [ + "ca-central-1": "transcribestreaming-fips.ca-central-1.amazonaws.com", + "us-east-1": "transcribestreaming-fips.us-east-1.amazonaws.com", + "us-east-2": "transcribestreaming-fips.us-east-2.amazonaws.com", + "us-gov-east-1": "transcribestreaming-fips.us-gov-east-1.amazonaws.com", + "us-gov-west-1": "transcribestreaming-fips.us-gov-west-1.amazonaws.com", + "us-west-2": "transcribestreaming-fips.us-west-2.amazonaws.com" + ]) + ]} // MARK: API Calls @@ -96,17 +108,17 @@ public struct TranscribeStreaming: AWSService { /// Starts a bidirectional HTTP/2 or WebSocket stream where audio is streamed to Amazon Transcribe and the transcription results are streamed to your application. Use this operation for Call Analytics transcriptions. The following parameters are required: language-code media-encoding sample-rate For more information on streaming with Amazon Transcribe, see Transcribing streaming audio. /// /// Parameters: - /// - audioStream: - /// - contentIdentificationType: Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. - /// - contentRedactionType: Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// - audioStream: An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames. For more information, see Transcribing streaming audio. + /// - contentIdentificationType: Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// - contentRedactionType: Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. /// - enablePartialResultsStabilization: Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see Partial-result stabilization. - /// - languageCode: Specify the language code that represents the language spoken in your audio. If you're unsure of the language spoken in your audio, consider using IdentifyLanguage to enable automatic language identification. For a list of languages supported with streaming Call Analytics, refer to the Supported languages table. + /// - languageCode: Specify the language code that represents the language spoken in your audio. For a list of languages supported with real-time Call Analytics, refer to the Supported languages table. /// - languageModelName: Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch. For more information, see Custom language models. /// - mediaEncoding: Specify the encoding of your input audio. Supported formats are: FLAC OPUS-encoded audio in an Ogg container PCM (only signed 16-bit little-endian audio formats, which does not include WAV) For more information, see Media formats. /// - mediaSampleRateHertz: The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio. /// - partialResultsStability: Specify the level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result stabilization. - /// - piiEntityTypes: Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. To include PiiEntityTypes in your Call Analytics request, you must also include either ContentIdentificationType or ContentRedactionType. Values must be comma-separated and can include: BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, ADDRESS, NAME, PHONE, SSN, or ALL. - /// - sessionId: Specify a name for your Call Analytics transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. You can use a session ID to retry a streaming session. + /// - piiEntityTypes: Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL, NAME, PHONE, PIN, SSN, or ALL. Note that if you include PiiEntityTypes in your request, you must also include ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or ContentIdentificationType in your request, but do not include PiiEntityTypes, all PII is redacted or identified. + /// - sessionId: Specify a name for your Call Analytics transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. /// - vocabularyFilterMethod: Specify how you want your vocabulary filter applied to your transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag. /// - vocabularyFilterName: Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription. For more information, see Using vocabulary filtering with unwanted words. /// - vocabularyName: Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive. If the language of the specified custom vocabulary doesn't match the language identified in your media, the custom vocabulary is not applied to your transcription. For more information, see Custom vocabularies. @@ -166,12 +178,12 @@ public struct TranscribeStreaming: AWSService { /// Parameters: /// - audioStream: /// - contentIdentificationType: Labels all personal health information (PHI) identified in your transcript. Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment. For more information, see Identifying personal health information (PHI) in a transcription. - /// - enableChannelIdentification: Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. For more information, see Transcribing multi-channel audio. + /// - enableChannelIdentification: Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. If you include EnableChannelIdentification in your request, you must also include NumberOfChannels. For more information, see Transcribing multi-channel audio. /// - languageCode: Specify the language code that represents the language spoken in your audio. Amazon Transcribe Medical only supports US English (en-US). /// - mediaEncoding: Specify the encoding used for the input audio. Supported formats are: FLAC OPUS-encoded audio in an Ogg container PCM (only signed 16-bit little-endian audio formats, which does not include WAV) For more information, see Media formats. /// - mediaSampleRateHertz: The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio. - /// - numberOfChannels: Specify the number of channels in your audio stream. Up to two channels are supported. - /// - sessionId: Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response. You can use a session ID to retry a streaming session. + /// - numberOfChannels: Specify the number of channels in your audio stream. This value must be 2, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request. If you include NumberOfChannels in your request, you must also include EnableChannelIdentification. + /// - sessionId: Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response. /// - showSpeakerLabel: Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization). /// - specialty: Specify the medical specialty contained in your audio. /// - type: Specify the type of input audio. For example, choose DICTATION for a provider dictating patient notes and CONVERSATION for a dialogue between a patient and a medical professional. @@ -227,22 +239,22 @@ public struct TranscribeStreaming: AWSService { /// /// Parameters: /// - audioStream: An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames. For more information, see Transcribing streaming audio. - /// - contentIdentificationType: Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. - /// - contentRedactionType: Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. - /// - enableChannelIdentification: Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. For more information, see Transcribing multi-channel audio. + /// - contentIdentificationType: Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// - contentRedactionType: Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// - enableChannelIdentification: Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. If you include EnableChannelIdentification in your request, you must also include NumberOfChannels. For more information, see Transcribing multi-channel audio. /// - enablePartialResultsStabilization: Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see Partial-result stabilization. - /// - identifyLanguage: Enables automatic language identification for your transcription. If you include IdentifyLanguage, you can optionally include a list of language codes, using LanguageOptions, that you think may be present in your audio stream. Including language options can improve transcription accuracy. You can also include a preferred language using PreferredLanguage. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter. If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel. Note that you must include either LanguageCode or IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. Streaming language identification can't be combined with custom language models or redaction. - /// - identifyMultipleLanguages: Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead. If you include IdentifyMultipleLanguages, you can optionally include a list of language codes, using LanguageOptions, that you think may be present in your stream. Including LanguageOptions restricts IdentifyMultipleLanguages to only the language options that you specify, which can improve transcription accuracy. If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames. Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. + /// - identifyLanguage: Enables automatic language identification for your transcription. If you include IdentifyLanguage, you must include a list of language codes, using LanguageOptions, that you think may be present in your audio stream. You can also include a preferred language using PreferredLanguage. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter. If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel. Note that you must include either LanguageCode or IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. Streaming language identification can't be combined with custom language models or redaction. + /// - identifyMultipleLanguages: Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead. If you include IdentifyMultipleLanguages, you must include a list of language codes, using LanguageOptions, that you think may be present in your stream. If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames. Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. /// - languageCode: Specify the language code that represents the language spoken in your audio. If you're unsure of the language spoken in your audio, consider using IdentifyLanguage to enable automatic language identification. For a list of languages supported with Amazon Transcribe streaming, refer to the Supported languages table. /// - languageModelName: Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch. For more information, see Custom language models. - /// - languageOptions: Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter. Including language options can improve the accuracy of language identification. If you include LanguageOptions in your request, you must also include IdentifyLanguage. For a list of languages supported with Amazon Transcribe streaming, refer to the Supported languages table. You can only include one language dialect per language per stream. For example, you cannot include en-US and en-AU in the same request. + /// - languageOptions: Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Including language options can improve the accuracy of language identification. If you include LanguageOptions in your request, you must also include IdentifyLanguage or IdentifyMultipleLanguages. For a list of languages supported with Amazon Transcribe streaming, refer to the Supported languages table. You can only include one language dialect per language per stream. For example, you cannot include en-US and en-AU in the same request. /// - mediaEncoding: Specify the encoding of your input audio. Supported formats are: FLAC OPUS-encoded audio in an Ogg container PCM (only signed 16-bit little-endian audio formats, which does not include WAV) For more information, see Media formats. /// - mediaSampleRateHertz: The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio. - /// - numberOfChannels: Specify the number of channels in your audio stream. Up to two channels are supported. + /// - numberOfChannels: Specify the number of channels in your audio stream. This value must be 2, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request. If you include NumberOfChannels in your request, you must also include EnableChannelIdentification. /// - partialResultsStability: Specify the level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result stabilization. - /// - piiEntityTypes: Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. To include PiiEntityTypes in your request, you must also include either ContentIdentificationType or ContentRedactionType. Values must be comma-separated and can include: BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, ADDRESS, NAME, PHONE, SSN, or ALL. + /// - piiEntityTypes: Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL, NAME, PHONE, PIN, SSN, or ALL. Note that if you include PiiEntityTypes in your request, you must also include ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or ContentIdentificationType in your request, but do not include PiiEntityTypes, all PII is redacted or identified. /// - preferredLanguage: Specify a preferred language from the subset of languages codes you specified in LanguageOptions. You can only use this parameter if you've included IdentifyLanguage and LanguageOptions in your request. - /// - sessionId: Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. You can use a session ID to retry a streaming session. + /// - sessionId: Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. /// - showSpeakerLabel: Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization). /// - vocabularyFilterMethod: Specify how you want your vocabulary filter applied to your transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag. /// - vocabularyFilterName: Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If the language of the specified custom vocabulary filter doesn't match the language identified in your media, the vocabulary filter is not applied to your transcription. This parameter is not intended for use with the IdentifyLanguage parameter. If you're including IdentifyLanguage in your request and want to use one or more vocabulary filters with your transcription, use the VocabularyFilterNames parameter instead. For more information, see Using vocabulary filtering with unwanted words. diff --git a/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_shapes.swift b/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_shapes.swift index c222531a59..8d1c5cb9ab 100644 --- a/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_shapes.swift +++ b/Sources/Soto/Services/TranscribeStreaming/TranscribeStreaming_shapes.swift @@ -62,20 +62,60 @@ extension TranscribeStreaming { } public enum LanguageCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case afZa = "af-ZA" + case arAe = "ar-AE" + case arSa = "ar-SA" + case caEs = "ca-ES" + case csCz = "cs-CZ" + case daDk = "da-DK" + case deCh = "de-CH" case deDe = "de-DE" + case elGr = "el-GR" + case enAb = "en-AB" case enAu = "en-AU" case enGb = "en-GB" + case enIe = "en-IE" + case enIn = "en-IN" + case enNz = "en-NZ" case enUs = "en-US" + case enWl = "en-WL" + case enZa = "en-ZA" + case esEs = "es-ES" case esUs = "es-US" + case euEs = "eu-ES" + case faIr = "fa-IR" + case fiFi = "fi-FI" case frCa = "fr-CA" case frFr = "fr-FR" + case glEs = "gl-ES" + case heIl = "he-IL" case hiIn = "hi-IN" + case hrHr = "hr-HR" + case idId = "id-ID" case itIt = "it-IT" case jaJp = "ja-JP" case koKr = "ko-KR" + case lvLv = "lv-LV" + case msMy = "ms-MY" + case nlNl = "nl-NL" + case noNo = "no-NO" + case plPl = "pl-PL" case ptBr = "pt-BR" + case ptPt = "pt-PT" + case roRo = "ro-RO" + case ruRu = "ru-RU" + case skSk = "sk-SK" + case soSo = "so-SO" + case srRs = "sr-RS" + case svSe = "sv-SE" case thTh = "th-TH" + case tlPh = "tl-PH" + case ukUa = "uk-UA" + case viVn = "vi-VN" case zhCn = "zh-CN" + case zhHk = "zh-HK" + case zhTw = "zh-TW" + case zuZa = "zu-ZA" public var description: String { return self.rawValue } } @@ -510,7 +550,7 @@ extension TranscribeStreaming { public struct ConfigurationEvent: AWSEncodableShape { /// Indicates which speaker is on which audio channel. public let channelDefinitions: [ChannelDefinition]? - /// Provides additional optional settings for your Call Analytics post-call request, including encryption and output locations for your redacted and unredacted transcript. + /// Provides additional optional settings for your Call Analytics post-call request, including encryption and output locations for your redacted transcript. PostCallAnalyticsSettings provides you with the same insights as a Call Analytics post-call transcription. Refer to Post-call analytics for more information on this feature. public let postCallAnalyticsSettings: PostCallAnalyticsSettings? @inlinable @@ -847,7 +887,7 @@ extension TranscribeStreaming { public let contentRedactionOutput: ContentRedactionOutput? /// The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails. IAM role ARNs have the format arn:partition:iam::account:role/role-name-with-path. For example: arn:aws:iam::111122223333:role/Admin. For more information, see IAM ARNs. public let dataAccessRoleArn: String - /// The KMS key you want to use to encrypt your Call Analytics post-call output. If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of four ways: Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Use an alias for the KMS key ID. For example, alias/ExampleAlias. Use the Amazon Resource Name (ARN) for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If using a key located in a different Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways: Use the ARN for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. Note that the user making the request must have permission to use the specified KMS key. + /// The KMS key you want to use to encrypt your Call Analytics post-call output. If using a key located in the current Amazon Web Services account, you can specify your KMS key in one of four ways: Use the KMS key ID itself. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. Use an alias for the KMS key ID. For example, alias/ExampleAlias. Use the Amazon Resource Name (ARN) for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. If using a key located in a different Amazon Web Services account than the current Amazon Web Services account, you can specify your KMS key in one of two ways: Use the ARN for the KMS key ID. For example, arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab. Use the ARN for the KMS key alias. For example, arn:aws:kms:region:account-ID:alias/ExampleAlias. Note that the role making the request must have permission to use the specified KMS key. public let outputEncryptionKMSKeyId: String? /// The Amazon S3 location where you want your Call Analytics post-call transcription output stored. You can use any of the following formats to specify the output location: s3://DOC-EXAMPLE-BUCKET s3://DOC-EXAMPLE-BUCKET/my-output-folder/ s3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json public let outputLocation: String @@ -924,14 +964,15 @@ extension TranscribeStreaming { } public struct StartCallAnalyticsStreamTranscriptionRequest: AWSEncodableShape { + /// An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames. For more information, see Transcribing streaming audio. public let audioStream: AWSEventStream - /// Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. public let contentIdentificationType: ContentIdentificationType? - /// Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. public let contentRedactionType: ContentRedactionType? /// Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see Partial-result stabilization. public let enablePartialResultsStabilization: Bool? - /// Specify the language code that represents the language spoken in your audio. If you're unsure of the language spoken in your audio, consider using IdentifyLanguage to enable automatic language identification. For a list of languages supported with streaming Call Analytics, refer to the Supported languages table. + /// Specify the language code that represents the language spoken in your audio. For a list of languages supported with real-time Call Analytics, refer to the Supported languages table. public let languageCode: CallAnalyticsLanguageCode /// Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch. For more information, see Custom language models. public let languageModelName: String? @@ -941,9 +982,9 @@ extension TranscribeStreaming { public let mediaSampleRateHertz: Int /// Specify the level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result stabilization. public let partialResultsStability: PartialResultsStability? - /// Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. To include PiiEntityTypes in your Call Analytics request, you must also include either ContentIdentificationType or ContentRedactionType. Values must be comma-separated and can include: BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, ADDRESS, NAME, PHONE, SSN, or ALL. + /// Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL, NAME, PHONE, PIN, SSN, or ALL. Note that if you include PiiEntityTypes in your request, you must also include ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or ContentIdentificationType in your request, but do not include PiiEntityTypes, all PII is redacted or identified. public let piiEntityTypes: String? - /// Specify a name for your Call Analytics transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. You can use a session ID to retry a streaming session. + /// Specify a name for your Call Analytics transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. public let sessionId: String? /// Specify how you want your vocabulary filter applied to your transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag. public let vocabularyFilterMethod: VocabularyFilterMethod? @@ -1014,7 +1055,7 @@ extension TranscribeStreaming { public struct StartCallAnalyticsStreamTranscriptionResponse: AWSDecodableShape { public static let _options: AWSShapeOptions = [.rawPayload] - /// Provides detailed information about your Call Analytics streaming session. + /// Provides detailed information about your real-time Call Analytics session. public let callAnalyticsTranscriptResultStream: AWSEventStream /// Shows whether content identification was enabled for your Call Analytics transcription. public let contentIdentificationType: ContentIdentificationType? @@ -1034,7 +1075,7 @@ extension TranscribeStreaming { public let partialResultsStability: PartialResultsStability? /// Lists the PII entity types you specified in your Call Analytics request. public let piiEntityTypes: String? - /// Provides the identifier for your Call Analytics streaming request. + /// Provides the identifier for your real-time Call Analytics request. public let requestId: String? /// Provides the identifier for your Call Analytics transcription session. public let sessionId: String? @@ -1091,7 +1132,7 @@ extension TranscribeStreaming { public let audioStream: AWSEventStream /// Labels all personal health information (PHI) identified in your transcript. Content identification is performed at the segment level; PHI is flagged upon complete transcription of an audio segment. For more information, see Identifying personal health information (PHI) in a transcription. public let contentIdentificationType: MedicalContentIdentificationType? - /// Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. For more information, see Transcribing multi-channel audio. + /// Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. If you include EnableChannelIdentification in your request, you must also include NumberOfChannels. For more information, see Transcribing multi-channel audio. public let enableChannelIdentification: Bool? /// Specify the language code that represents the language spoken in your audio. Amazon Transcribe Medical only supports US English (en-US). public let languageCode: LanguageCode @@ -1099,9 +1140,9 @@ extension TranscribeStreaming { public let mediaEncoding: MediaEncoding /// The sample rate of the input audio (in hertz). Amazon Transcribe Medical supports a range from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio. public let mediaSampleRateHertz: Int - /// Specify the number of channels in your audio stream. Up to two channels are supported. + /// Specify the number of channels in your audio stream. This value must be 2, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request. If you include NumberOfChannels in your request, you must also include EnableChannelIdentification. public let numberOfChannels: Int? - /// Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response. You can use a session ID to retry a streaming session. + /// Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe Medical generates an ID and returns it in the response. public let sessionId: String? /// Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization). public let showSpeakerLabel: Bool? @@ -1230,37 +1271,37 @@ extension TranscribeStreaming { public struct StartStreamTranscriptionRequest: AWSEncodableShape { /// An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket data frames. For more information, see Transcribing streaming audio. public let audioStream: AWSEventStream - /// Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// Labels all personally identifiable information (PII) identified in your transcript. Content identification is performed at the segment level; PII specified in PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. public let contentIdentificationType: ContentIdentificationType? - /// Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. + /// Redacts all personally identifiable information (PII) identified in your transcript. Content redaction is performed at the segment level; PII specified in PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't include PiiEntityTypes in your request, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException. For more information, see Redacting or identifying personally identifiable information. public let contentRedactionType: ContentRedactionType? - /// Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. For more information, see Transcribing multi-channel audio. + /// Enables channel identification in multi-channel audio. Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript. If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript is not separated by channel. If you include EnableChannelIdentification in your request, you must also include NumberOfChannels. For more information, see Transcribing multi-channel audio. public let enableChannelIdentification: Bool? /// Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy. For more information, see Partial-result stabilization. public let enablePartialResultsStabilization: Bool? - /// Enables automatic language identification for your transcription. If you include IdentifyLanguage, you can optionally include a list of language codes, using LanguageOptions, that you think may be present in your audio stream. Including language options can improve transcription accuracy. You can also include a preferred language using PreferredLanguage. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter. If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel. Note that you must include either LanguageCode or IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. Streaming language identification can't be combined with custom language models or redaction. + /// Enables automatic language identification for your transcription. If you include IdentifyLanguage, you must include a list of language codes, using LanguageOptions, that you think may be present in your audio stream. You can also include a preferred language using PreferredLanguage. Adding a preferred language can help Amazon Transcribe identify the language faster than if you omit this parameter. If you have multi-channel audio that contains different languages on each channel, and you've enabled channel identification, automatic language identification identifies the dominant language on each audio channel. Note that you must include either LanguageCode or IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. Streaming language identification can't be combined with custom language models or redaction. public let identifyLanguage: Bool? - /// Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead. If you include IdentifyMultipleLanguages, you can optionally include a list of language codes, using LanguageOptions, that you think may be present in your stream. Including LanguageOptions restricts IdentifyMultipleLanguages to only the language options that you specify, which can improve transcription accuracy. If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames. Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. + /// Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead. If you include IdentifyMultipleLanguages, you must include a list of language codes, using LanguageOptions, that you think may be present in your stream. If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames. Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails. public let identifyMultipleLanguages: Bool? /// Specify the language code that represents the language spoken in your audio. If you're unsure of the language spoken in your audio, consider using IdentifyLanguage to enable automatic language identification. For a list of languages supported with Amazon Transcribe streaming, refer to the Supported languages table. public let languageCode: LanguageCode? /// Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch. For more information, see Custom language models. public let languageModelName: String? - /// Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter. Including language options can improve the accuracy of language identification. If you include LanguageOptions in your request, you must also include IdentifyLanguage. For a list of languages supported with Amazon Transcribe streaming, refer to the Supported languages table. You can only include one language dialect per language per stream. For example, you cannot include en-US and en-AU in the same request. + /// Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. Including language options can improve the accuracy of language identification. If you include LanguageOptions in your request, you must also include IdentifyLanguage or IdentifyMultipleLanguages. For a list of languages supported with Amazon Transcribe streaming, refer to the Supported languages table. You can only include one language dialect per language per stream. For example, you cannot include en-US and en-AU in the same request. public let languageOptions: String? /// Specify the encoding of your input audio. Supported formats are: FLAC OPUS-encoded audio in an Ogg container PCM (only signed 16-bit little-endian audio formats, which does not include WAV) For more information, see Media formats. public let mediaEncoding: MediaEncoding /// The sample rate of the input audio (in hertz). Low-quality audio, such as telephone audio, is typically around 8,000 Hz. High-quality audio typically ranges from 16,000 Hz to 48,000 Hz. Note that the sample rate you specify must match that of your audio. public let mediaSampleRateHertz: Int - /// Specify the number of channels in your audio stream. Up to two channels are supported. + /// Specify the number of channels in your audio stream. This value must be 2, as only two channels are supported. If your audio doesn't contain multiple channels, do not include this parameter in your request. If you include NumberOfChannels in your request, you must also include EnableChannelIdentification. public let numberOfChannels: Int? /// Specify the level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy. For more information, see Partial-result stabilization. public let partialResultsStability: PartialResultsStability? - /// Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. To include PiiEntityTypes in your request, you must also include either ContentIdentificationType or ContentRedactionType. Values must be comma-separated and can include: BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, ADDRESS, NAME, PHONE, SSN, or ALL. + /// Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, CREDIT_DEBIT_NUMBER, EMAIL, NAME, PHONE, PIN, SSN, or ALL. Note that if you include PiiEntityTypes in your request, you must also include ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or ContentIdentificationType in your request, but do not include PiiEntityTypes, all PII is redacted or identified. public let piiEntityTypes: String? /// Specify a preferred language from the subset of languages codes you specified in LanguageOptions. You can only use this parameter if you've included IdentifyLanguage and LanguageOptions in your request. public let preferredLanguage: LanguageCode? - /// Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. You can use a session ID to retry a streaming session. + /// Specify a name for your transcription session. If you don't include this parameter in your request, Amazon Transcribe generates an ID and returns it in the response. public let sessionId: String? /// Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file. For more information, see Partitioning speakers (diarization). public let showSpeakerLabel: Bool? @@ -1369,7 +1410,7 @@ extension TranscribeStreaming { public let contentIdentificationType: ContentIdentificationType? /// Shows whether content redaction was enabled for your transcription. public let contentRedactionType: ContentRedactionType? - /// Shows whether channel identification was enabled for your transcription. + /// Shows whether channel identification was enabled for your transcription. public let enableChannelIdentification: Bool? /// Shows whether partial results stabilization was enabled for your transcription. public let enablePartialResultsStabilization: Bool? @@ -1601,7 +1642,7 @@ public struct TranscribeStreamingErrorType: AWSErrorType { /// return error code string public var errorCode: String { self.error.rawValue } - /// One or more arguments to the StartStreamTranscription, StartMedicalStreamTranscription, or StartCallAnalyticsStreamTranscription operation was not valid. For example, MediaEncoding or LanguageCode used not valid values. Check the specified parameters and try your request again. + /// One or more arguments to the StartStreamTranscription, StartMedicalStreamTranscription, or StartCallAnalyticsStreamTranscription operation was not valid. For example, MediaEncoding or LanguageCode used unsupported values. Check the specified parameters and try your request again. public static var badRequestException: Self { .init(.badRequestException) } /// A new stream started with the same session ID. The current stream has been terminated. public static var conflictException: Self { .init(.conflictException) } diff --git a/Sources/Soto/Services/Transfer/Transfer_api.swift b/Sources/Soto/Services/Transfer/Transfer_api.swift index d147dcc1a2..a6a5fdd8e5 100644 --- a/Sources/Soto/Services/Transfer/Transfer_api.swift +++ b/Sources/Soto/Services/Transfer/Transfer_api.swift @@ -25,7 +25,7 @@ import Foundation /// Service object for interacting with AWS Transfer service. /// -/// Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up. +/// Transfer Family is a fully managed service that enables the transfer of files over the File Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS. Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3. Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating with existing authentication systems, and providing DNS routing with Amazon Route 53 so nothing changes for your customers and partners, or their applications. With your data in Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and set up. public struct Transfer: AWSService { // MARK: Member variables @@ -161,7 +161,7 @@ public struct Transfer: AWSService { /// /// Parameters: /// - accessRole: Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. - /// - baseDirectory: The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /DOC-EXAMPLE-BUCKET/home/mydirectory. + /// - baseDirectory: The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory. /// - description: A name or short description to identify the agreement. /// - localProfileId: A unique identifier for the AS2 local profile. /// - partnerProfileId: A unique identifier for the partner profile used in the agreement. @@ -1384,6 +1384,44 @@ public struct Transfer: AWSService { return try await self.listExecutions(input, logger: logger) } + /// Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. You specify the file transfer by providing its ConnectorId and its TransferId. File transfer results are available up to 7 days after an operation has been requested. + @Sendable + @inlinable + public func listFileTransferResults(_ input: ListFileTransferResultsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFileTransferResultsResponse { + try await self.client.execute( + operation: "ListFileTransferResults", + path: "/listFileTransferResults", + httpMethod: .POST, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. You specify the file transfer by providing its ConnectorId and its TransferId. File transfer results are available up to 7 days after an operation has been requested. + /// + /// Parameters: + /// - connectorId: A unique identifier for a connector. This value should match the value supplied to the corresponding StartFileTransfer call. + /// - maxResults: The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single StartFileTransfer operation. Thus, the maximum number of file transfer results that can be returned in a single page is 10. + /// - nextToken: If there are more file details than returned in this call, use this value for a subsequent call to ListFileTransferResults to retrieve them. + /// - transferId: A unique identifier for a file transfer. This value should match the value supplied to the corresponding StartFileTransfer call. + /// - logger: Logger use during operation + @inlinable + public func listFileTransferResults( + connectorId: String, + maxResults: Int? = nil, + nextToken: String? = nil, + transferId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListFileTransferResultsResponse { + let input = ListFileTransferResultsRequest( + connectorId: connectorId, + maxResults: maxResults, + nextToken: nextToken, + transferId: transferId + ) + return try await self.listFileTransferResults(input, logger: logger) + } + /// Returns a list of host keys for the server that's specified by the ServerId parameter. @Sendable @inlinable @@ -1716,7 +1754,7 @@ public struct Transfer: AWSService { /// - localDirectoryPath: For an inbound transfer, the LocaDirectoryPath specifies the destination for one or more files that are transferred from the partner's SFTP server. /// - remoteDirectoryPath: For an outbound transfer, the RemoteDirectoryPath specifies the destination for one or more files that are transferred to the partner's SFTP server. If you don't specify a RemoteDirectoryPath, the destination for transferred files is the SFTP user's home directory. /// - retrieveFilePaths: One or more source paths for the partner's SFTP server. Each string represents a source file path for one inbound file transfer. - /// - sendFilePaths: One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt . Replace DOC-EXAMPLE-BUCKET with one of your actual buckets. + /// - sendFilePaths: One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, amzn-s3-demo-bucket/myfile.txt . Replace amzn-s3-demo-bucket with one of your actual buckets. /// - logger: Logger use during operation @inlinable public func startFileTransfer( @@ -1997,7 +2035,7 @@ public struct Transfer: AWSService { /// Parameters: /// - accessRole: Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. /// - agreementId: A unique identifier for the agreement. This identifier is returned when you create an agreement. - /// - baseDirectory: To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /DOC-EXAMPLE-BUCKET/home/mydirectory . + /// - baseDirectory: To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory . /// - description: To replace the existing description, provide a short description for the agreement. /// - localProfileId: A unique identifier for the AS2 local profile. To change the local profile identifier, provide a new value here. /// - partnerProfileId: A unique identifier for the partner profile. To change the partner profile identifier, provide a new value here. @@ -2199,7 +2237,7 @@ public struct Transfer: AWSService { /// Parameters: /// - certificate: The Amazon Resource Name (ARN) of the Amazon Web ServicesCertificate Manager (ACM) certificate. Required when Protocols is set to FTPS. To request a new public certificate, see Request a public certificate in the Amazon Web ServicesCertificate Manager User Guide. To import an existing certificate into ACM, see Importing certificates into ACM in the Amazon Web ServicesCertificate Manager User Guide. To request a private certificate to use FTPS through private IP addresses, see Request a private certificate in the Amazon Web ServicesCertificate Manager User Guide. Certificates with the following cryptographic algorithms and key sizes are supported: 2048-bit RSA (RSA_2048) 4096-bit RSA (RSA_4096) Elliptic Prime Curve 256 bit (EC_prime256v1) Elliptic Prime Curve 384 bit (EC_secp384r1) Elliptic Prime Curve 521 bit (EC_secp521r1) The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer. /// - endpointDetails: The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint. - /// - endpointType: The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it. After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC. For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint. It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT. + /// - endpointType: The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it. After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC. For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint. It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT. /// - hostKey: The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want to rotate keys, or have a set of active keys that use different algorithms. Use the following command to generate an RSA 2048 bit key with no passphrase: ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key. Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096. Use the following command to generate an ECDSA 256 bit key with no passphrase: ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key. Valid values for the -b option for ECDSA are 256, 384, and 521. Use the following command to generate an ED25519 key with no passphrase: ssh-keygen -t ed25519 -N "" -f my-new-server-key. For all of these commands, you can replace my-new-server-key with a string of your choice. If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide. /// - identityProviderDetails: An array containing all of the information required to call a customer's authentication API method. /// - loggingRole: The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs. @@ -2495,6 +2533,46 @@ extension Transfer { return self.listExecutionsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listFileTransferResults(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listFileTransferResultsPaginator( + _ input: ListFileTransferResultsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listFileTransferResults, + inputKey: \ListFileTransferResultsRequest.nextToken, + outputKey: \ListFileTransferResultsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listFileTransferResults(_:logger:)``. + /// + /// - Parameters: + /// - connectorId: A unique identifier for a connector. This value should match the value supplied to the corresponding StartFileTransfer call. + /// - maxResults: The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single StartFileTransfer operation. Thus, the maximum number of file transfer results that can be returned in a single page is 10. + /// - transferId: A unique identifier for a file transfer. This value should match the value supplied to the corresponding StartFileTransfer call. + /// - logger: Logger used for logging + @inlinable + public func listFileTransferResultsPaginator( + connectorId: String, + maxResults: Int? = nil, + transferId: String, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListFileTransferResultsRequest( + connectorId: connectorId, + maxResults: maxResults, + transferId: transferId + ) + return self.listFileTransferResultsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listProfiles(_:logger:)``. /// /// - Parameters: @@ -2762,6 +2840,18 @@ extension Transfer.ListExecutionsRequest: AWSPaginateToken { } } +extension Transfer.ListFileTransferResultsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> Transfer.ListFileTransferResultsRequest { + return .init( + connectorId: self.connectorId, + maxResults: self.maxResults, + nextToken: token, + transferId: self.transferId + ) + } +} + extension Transfer.ListProfilesRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> Transfer.ListProfilesRequest { diff --git a/Sources/Soto/Services/Transfer/Transfer_shapes.swift b/Sources/Soto/Services/Transfer/Transfer_shapes.swift index cbc196cfdb..3213de6ebf 100644 --- a/Sources/Soto/Services/Transfer/Transfer_shapes.swift +++ b/Sources/Soto/Services/Transfer/Transfer_shapes.swift @@ -222,6 +222,14 @@ extension Transfer { public var description: String { return self.rawValue } } + public enum TransferTableStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case completed = "COMPLETED" + case failed = "FAILED" + case inProgress = "IN_PROGRESS" + case queued = "QUEUED" + public var description: String { return self.rawValue } + } + public enum WorkflowStepType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case copy = "COPY" case custom = "CUSTOM" @@ -300,6 +308,32 @@ extension Transfer { } } + public struct ConnectorFileTransferResult: AWSDecodableShape { + /// For transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND + public let failureCode: String? + /// For transfers that fail, this parameter describes the reason for the failure. + public let failureMessage: String? + /// The filename and path to where the file was sent to or retrieved from. + public let filePath: String + /// The current status for the transfer. + public let statusCode: TransferTableStatus + + @inlinable + public init(failureCode: String? = nil, failureMessage: String? = nil, filePath: String, statusCode: TransferTableStatus) { + self.failureCode = failureCode + self.failureMessage = failureMessage + self.filePath = filePath + self.statusCode = statusCode + } + + private enum CodingKeys: String, CodingKey { + case failureCode = "FailureCode" + case failureMessage = "FailureMessage" + case filePath = "FilePath" + case statusCode = "StatusCode" + } + } + public struct CopyStepDetails: AWSEncodableShape & AWSDecodableShape { /// Specifies the location for the file being copied. Use ${Transfer:UserName} or ${Transfer:UploadDate} in this field to parametrize the destination prefix by username or uploaded date. Set the value of DestinationFileLocation to ${Transfer:UserName} to copy uploaded files to an Amazon S3 bucket that is prefixed with the name of the Transfer Family user that uploaded the file. Set the value of DestinationFileLocation to ${Transfer:UploadDate} to copy uploaded files to an Amazon S3 bucket that is prefixed with the date of the upload. The system resolves UploadDate to a date format of YYYY-MM-DD, based on the date the file is uploaded in UTC. public let destinationFileLocation: InputFileLocation? @@ -417,7 +451,7 @@ extension Transfer { public struct CreateAgreementRequest: AWSEncodableShape { /// Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use. For AS2 connectors With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer. If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key. For SFTP connectors Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager. public let accessRole: String - /// The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /DOC-EXAMPLE-BUCKET/home/mydirectory. + /// The landing directory (folder) for files transferred by using the AS2 protocol. A BaseDirectory example is /amzn-s3-demo-bucket/home/mydirectory. public let baseDirectory: String /// A name or short description to identify the agreement. public let description: String? @@ -2875,6 +2909,63 @@ extension Transfer { } } + public struct ListFileTransferResultsRequest: AWSEncodableShape { + /// A unique identifier for a connector. This value should match the value supplied to the corresponding StartFileTransfer call. + public let connectorId: String + /// The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single StartFileTransfer operation. Thus, the maximum number of file transfer results that can be returned in a single page is 10. + public let maxResults: Int? + /// If there are more file details than returned in this call, use this value for a subsequent call to ListFileTransferResults to retrieve them. + public let nextToken: String? + /// A unique identifier for a file transfer. This value should match the value supplied to the corresponding StartFileTransfer call. + public let transferId: String + + @inlinable + public init(connectorId: String, maxResults: Int? = nil, nextToken: String? = nil, transferId: String) { + self.connectorId = connectorId + self.maxResults = maxResults + self.nextToken = nextToken + self.transferId = transferId + } + + public func validate(name: String) throws { + try self.validate(self.connectorId, name: "connectorId", parent: name, max: 19) + try self.validate(self.connectorId, name: "connectorId", parent: name, min: 19) + try self.validate(self.connectorId, name: "connectorId", parent: name, pattern: "^c-([0-9a-f]{17})$") + try self.validate(self.maxResults, name: "maxResults", parent: name, max: 1000) + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 6144) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.transferId, name: "transferId", parent: name, max: 512) + try self.validate(self.transferId, name: "transferId", parent: name, min: 1) + try self.validate(self.transferId, name: "transferId", parent: name, pattern: "^[0-9a-zA-Z./-]+$") + } + + private enum CodingKeys: String, CodingKey { + case connectorId = "ConnectorId" + case maxResults = "MaxResults" + case nextToken = "NextToken" + case transferId = "TransferId" + } + } + + public struct ListFileTransferResultsResponse: AWSDecodableShape { + /// Returns the details for the files transferred in the transfer identified by the TransferId and ConnectorId specified. FilePath: the filename and path to where the file was sent to or retrieved from. StatusCode: current status for the transfer. The status returned is one of the following values:QUEUED, IN_PROGRESS, COMPLETED, or FAILED FailureCode: for transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND FailureMessage: for transfers that fail, this parameter describes the reason for the failure. + public let fileTransferResults: [ConnectorFileTransferResult] + /// Returns a token that you can use to call ListFileTransferResults again and receive additional results, if there are any (against the same TransferId. + public let nextToken: String? + + @inlinable + public init(fileTransferResults: [ConnectorFileTransferResult], nextToken: String? = nil) { + self.fileTransferResults = fileTransferResults + self.nextToken = nextToken + } + + private enum CodingKeys: String, CodingKey { + case fileTransferResults = "FileTransferResults" + case nextToken = "NextToken" + } + } + public struct ListHostKeysRequest: AWSEncodableShape { /// The maximum number of host keys to return. public let maxResults: Int? @@ -3875,7 +3966,7 @@ extension Transfer { public let remoteDirectoryPath: String? /// One or more source paths for the partner's SFTP server. Each string represents a source file path for one inbound file transfer. public let retrieveFilePaths: [String]? - /// One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, DOC-EXAMPLE-BUCKET/myfile.txt . Replace DOC-EXAMPLE-BUCKET with one of your actual buckets. + /// One or more source paths for the Amazon S3 storage. Each string represents a source file path for one outbound file transfer. For example, amzn-s3-demo-bucket/myfile.txt . Replace amzn-s3-demo-bucket with one of your actual buckets. public let sendFilePaths: [String]? @inlinable @@ -4286,7 +4377,7 @@ extension Transfer { public let accessRole: String? /// A unique identifier for the agreement. This identifier is returned when you create an agreement. public let agreementId: String - /// To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /DOC-EXAMPLE-BUCKET/home/mydirectory . + /// To change the landing directory (folder) for files that are transferred, provide the bucket folder that you want to use; for example, /amzn-s3-demo-bucket/home/mydirectory . public let baseDirectory: String? /// To replace the existing description, provide a short description for the agreement. public let description: String? @@ -4577,7 +4668,7 @@ extension Transfer { public let certificate: String? /// The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint. public let endpointDetails: EndpointDetails? - /// The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it. After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC. For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint. It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT. + /// The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it. After May 19, 2021, you won't be able to create a server using EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already done so before May 19, 2021. If you have already created servers with EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021, you will not be affected. After this date, use EndpointType=VPC. For more information, see https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint. It is recommended that you use VPC as the EndpointType. With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with EndpointType set to VPC_ENDPOINT. public let endpointType: EndpointType? /// The RSA, ECDSA, or ED25519 private key to use for your SFTP-enabled server. You can add multiple host keys, in case you want to rotate keys, or have a set of active keys that use different algorithms. Use the following command to generate an RSA 2048 bit key with no passphrase: ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key. Use a minimum value of 2048 for the -b option. You can create a stronger key by using 3072 or 4096. Use the following command to generate an ECDSA 256 bit key with no passphrase: ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key. Valid values for the -b option for ECDSA are 256, 384, and 521. Use the following command to generate an ED25519 key with no passphrase: ssh-keygen -t ed25519 -N "" -f my-new-server-key. For all of these commands, you can replace my-new-server-key with a string of your choice. If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see Manage host keys for your SFTP-enabled server in the Transfer Family User Guide. public let hostKey: String? @@ -4815,9 +4906,9 @@ extension Transfer { } public struct WorkflowDetails: AWSEncodableShape & AWSDecodableShape { - /// A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload. A partial upload occurs when a file is open when the session disconnects. + /// A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload. A partial upload occurs when a file is open when the session disconnects. OnPartialUpload can contain a maximum of one WorkflowDetail object. public let onPartialUpload: [WorkflowDetail]? - /// A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example. aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{"OnUpload":[]}' + /// A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example. aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{"OnUpload":[]}' OnUpload can contain a maximum of one WorkflowDetail object. public let onUpload: [WorkflowDetail]? @inlinable diff --git a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift index 4ddf492b1f..ec5c8bdb42 100644 --- a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift +++ b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_api.swift @@ -170,7 +170,7 @@ public struct VerifiedPermissions: AWSService { return try await self.batchIsAuthorizedWithToken(input, logger: logger) } - /// Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect (OIDC) identity provider (IdP). After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions derives information about your user and session from token claims. Access tokens provide action context to your policies, and ID tokens provide principal Attributes. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store To reference a user from this identity source in your Cedar policies, refer to the following syntax examples. Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user principal attribute], for example MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. OpenID Connect (OIDC) provider: Namespace::[Entity type]::[principalIdClaim]|[user principal attribute], for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. + /// Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect (OIDC) identity provider (IdP). After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions derives information about your user and session from token claims. Access tokens provide action context to your policies, and ID tokens provide principal Attributes. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store To reference a user from this identity source in your Cedar policies, refer to the following syntax examples. Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user principal attribute], for example MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. OpenID Connect (OIDC) provider: Namespace::[Entity type]::[entityIdPrefix]|[user principal attribute], for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. @Sendable @inlinable public func createIdentitySource(_ input: CreateIdentitySourceInput, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateIdentitySourceOutput { @@ -183,7 +183,7 @@ public struct VerifiedPermissions: AWSService { logger: logger ) } - /// Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect (OIDC) identity provider (IdP). After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions derives information about your user and session from token claims. Access tokens provide action context to your policies, and ID tokens provide principal Attributes. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store To reference a user from this identity source in your Cedar policies, refer to the following syntax examples. Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user principal attribute], for example MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. OpenID Connect (OIDC) provider: Namespace::[Entity type]::[principalIdClaim]|[user principal attribute], for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. + /// Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect (OIDC) identity provider (IdP). After you create an identity source, you can use the identities provided by the IdP as proxies for the principal in authorization queries that use the IsAuthorizedWithToken or BatchIsAuthorizedWithToken API operations. These identities take the form of tokens that contain claims about the user, such as IDs, attributes and group memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions derives information about your user and session from token claims. Access tokens provide action context to your policies, and ID tokens provide principal Attributes. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store To reference a user from this identity source in your Cedar policies, refer to the following syntax examples. Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user principal attribute], for example MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111. OpenID Connect (OIDC) provider: Namespace::[Entity type]::[entityIdPrefix]|[user principal attribute], for example MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222. Verified Permissions is eventually consistent . It can take a few seconds for a new or changed element to propagate through the service and be visible in the results of other Verified Permissions operations. /// /// Parameters: /// - clientToken: Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value.. If you don't provide this value, then Amazon Web Services generates a random one for you. If you retry the operation with the same ClientToken, but with different parameters, the retry fails with an ConflictException error. Verified Permissions recognizes a ClientToken for eight hours. After eight hours, the next request with the same parameters performs the operation again regardless of the value of ClientToken. @@ -639,7 +639,7 @@ public struct VerifiedPermissions: AWSService { return try await self.isAuthorized(input, logger: logger) } - /// Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision. At this time, Verified Permissions accepts tokens from only Amazon Cognito. Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store + /// Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision. Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store @Sendable @inlinable public func isAuthorizedWithToken(_ input: IsAuthorizedWithTokenInput, logger: Logger = AWSClient.loggingDisabled) async throws -> IsAuthorizedWithTokenOutput { @@ -652,7 +652,7 @@ public struct VerifiedPermissions: AWSService { logger: logger ) } - /// Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision. At this time, Verified Permissions accepts tokens from only Amazon Cognito. Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store + /// Makes an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source in the form of an identity token formatted as a JSON web token (JWT). The information in the parameters can also define additional context that Verified Permissions can include in the evaluation. The request is evaluated against all matching policies in the specified policy store. The result of the decision is either Allow or Deny, along with a list of the policies that resulted in the decision. Verified Permissions validates each token that is specified in a request by checking its expiration date and its signature. Tokens from an identity source user continue to be usable until they expire. Token revocation and resource deletion have no effect on the validity of a token in your policy store /// /// Parameters: /// - accessToken: Specifies an access token for the principal to be authorized. This token is provided to you by the identity provider (IdP) associated with the specified identity source. You must specify either an accessToken, an identityToken, or both. Must be an access token. Verified Permissions returns an error if the token_use claim in the submitted token isn't access. @@ -880,7 +880,7 @@ public struct VerifiedPermissions: AWSService { /// - identitySourceId: Specifies the ID of the identity source that you want to update. /// - policyStoreId: Specifies the ID of the policy store that contains the identity source that you want to update. /// - principalEntityType: Specifies the data type of principals generated for identities authenticated by the identity source. - /// - updateConfiguration: Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. At this time, the only valid member of this structure is a Amazon Cognito user pool configuration. You must specify a userPoolArn, and optionally, a ClientId. + /// - updateConfiguration: Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. /// - logger: Logger use during operation @inlinable public func updateIdentitySource( diff --git a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift index 5349b6d954..137f9eaa7f 100644 --- a/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift +++ b/Sources/Soto/Services/VerifiedPermissions/VerifiedPermissions_shapes.swift @@ -2541,7 +2541,7 @@ extension VerifiedPermissions { public let policyId: String /// The identifier of the PolicyStore where the policy you want information about is stored. public let policyStoreId: String - /// The type of the policy. This is one of the following values: static templateLinked + /// The type of the policy. This is one of the following values: STATIC TEMPLATE_LINKED public let policyType: PolicyType /// The principal associated with the policy. public let principal: EntityIdentifier? @@ -2885,7 +2885,7 @@ extension VerifiedPermissions { public let policyStoreId: String /// Specifies the data type of principals generated for identities authenticated by the identity source. public let principalEntityType: String? - /// Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. At this time, the only valid member of this structure is a Amazon Cognito user pool configuration. You must specify a userPoolArn, and optionally, a ClientId. + /// Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. public let updateConfiguration: UpdateConfiguration @inlinable @@ -3347,7 +3347,7 @@ extension VerifiedPermissions { } public struct SchemaDefinition: AWSEncodableShape { - /// A JSON string representation of the schema supported by applications that use this policy store. For more information, see Policy store schema in the Amazon Verified Permissions User Guide. + /// A JSON string representation of the schema supported by applications that use this policy store. To delete the schema, run PutSchema with {} for this parameter. For more information, see Policy store schema in the Amazon Verified Permissions User Guide. public let cedarJson: String? @inlinable diff --git a/Sources/Soto/Services/Wisdom/Wisdom_api.swift b/Sources/Soto/Services/Wisdom/Wisdom_api.swift index 1ec66cdbf7..cb940edbd1 100644 --- a/Sources/Soto/Services/Wisdom/Wisdom_api.swift +++ b/Sources/Soto/Services/Wisdom/Wisdom_api.swift @@ -80,6 +80,7 @@ public struct Wisdom: AWSService { /// FIPS and dualstack endpoints static var variantEndpoints: [EndpointVariantType: AWSServiceConfig.EndpointVariant] {[ [.fips]: .init(endpoints: [ + "ca-central-1": "wisdom-fips.ca-central-1.amazonaws.com", "us-east-1": "wisdom-fips.us-east-1.amazonaws.com", "us-west-2": "wisdom-fips.us-west-2.amazonaws.com" ]) diff --git a/Sources/Soto/Services/WorkLink/WorkLink_api.swift b/Sources/Soto/Services/WorkLink/WorkLink_api.swift deleted file mode 100644 index cad8bd6b7c..0000000000 --- a/Sources/Soto/Services/WorkLink/WorkLink_api.swift +++ /dev/null @@ -1,1487 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_exported import SotoCore - -/// Service object for interacting with AWS WorkLink service. -/// -/// Amazon WorkLink is a cloud-based service that provides secure access to internal websites and web apps from iOS and Android phones. In a single step, your users, such as employees, can access internal websites as efficiently as they access any other public website. They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink authenticates the user's access and securely renders authorized internal web content in a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on mobile devices. -public struct WorkLink: AWSService { - // MARK: Member variables - - /// Client used for communication with AWS - public let client: AWSClient - /// Service configuration - public let config: AWSServiceConfig - - // MARK: Initialization - - /// Initialize the WorkLink client - /// - parameters: - /// - client: AWSClient used to process requests - /// - region: Region of server you want to communicate with. This will override the partition parameter. - /// - partition: AWS partition where service resides, standard (.aws), china (.awscn), government (.awsusgov). - /// - endpoint: Custom endpoint URL to use instead of standard AWS servers - /// - middleware: Middleware chain used to edit requests before they are sent and responses before they are decoded - /// - timeout: Timeout value for HTTP requests - /// - byteBufferAllocator: Allocator for ByteBuffers - /// - options: Service options - public init( - client: AWSClient, - region: SotoCore.Region? = nil, - partition: AWSPartition = .aws, - endpoint: String? = nil, - middleware: AWSMiddlewareProtocol? = nil, - timeout: TimeAmount? = nil, - byteBufferAllocator: ByteBufferAllocator = ByteBufferAllocator(), - options: AWSServiceConfig.Options = [] - ) { - self.client = client - self.config = AWSServiceConfig( - region: region, - partition: region?.partition ?? partition, - serviceName: "WorkLink", - serviceIdentifier: "worklink", - serviceProtocol: .restjson, - apiVersion: "2018-09-25", - endpoint: endpoint, - errorType: WorkLinkErrorType.self, - middleware: middleware, - timeout: timeout, - byteBufferAllocator: byteBufferAllocator, - options: options - ) - } - - - - - - // MARK: API Calls - - /// Specifies a domain to be associated to Amazon WorkLink. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func associateDomain(_ input: AssociateDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateDomainResponse { - try await self.client.execute( - operation: "AssociateDomain", - path: "/associateDomain", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Specifies a domain to be associated to Amazon WorkLink. - /// - /// Parameters: - /// - acmCertificateArn: The ARN of an issued ACM certificate that is valid for the domain being associated. - /// - displayName: The name to display. - /// - domainName: The fully qualified domain name (FQDN). - /// - fleetArn: The Amazon Resource Name (ARN) of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func associateDomain( - acmCertificateArn: String, - displayName: String? = nil, - domainName: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> AssociateDomainResponse { - let input = AssociateDomainRequest( - acmCertificateArn: acmCertificateArn, - displayName: displayName, - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.associateDomain(input, logger: logger) - } - - /// Associates a website authorization provider with a specified fleet. This is used to authorize users against associated websites in the company network. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func associateWebsiteAuthorizationProvider(_ input: AssociateWebsiteAuthorizationProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateWebsiteAuthorizationProviderResponse { - try await self.client.execute( - operation: "AssociateWebsiteAuthorizationProvider", - path: "/associateWebsiteAuthorizationProvider", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Associates a website authorization provider with a specified fleet. This is used to authorize users against associated websites in the company network. - /// - /// Parameters: - /// - authorizationProviderType: The authorization provider type. - /// - domainName: The domain name of the authorization provider. This applies only to SAML-based authorization providers. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func associateWebsiteAuthorizationProvider( - authorizationProviderType: AuthorizationProviderType, - domainName: String? = nil, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> AssociateWebsiteAuthorizationProviderResponse { - let input = AssociateWebsiteAuthorizationProviderRequest( - authorizationProviderType: authorizationProviderType, - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.associateWebsiteAuthorizationProvider(input, logger: logger) - } - - /// Imports the root certificate of a certificate authority (CA) used to obtain TLS certificates used by associated websites within the company network. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func associateWebsiteCertificateAuthority(_ input: AssociateWebsiteCertificateAuthorityRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> AssociateWebsiteCertificateAuthorityResponse { - try await self.client.execute( - operation: "AssociateWebsiteCertificateAuthority", - path: "/associateWebsiteCertificateAuthority", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Imports the root certificate of a certificate authority (CA) used to obtain TLS certificates used by associated websites within the company network. - /// - /// Parameters: - /// - certificate: The root certificate of the CA. - /// - displayName: The certificate name to display. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func associateWebsiteCertificateAuthority( - certificate: String, - displayName: String? = nil, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> AssociateWebsiteCertificateAuthorityResponse { - let input = AssociateWebsiteCertificateAuthorityRequest( - certificate: certificate, - displayName: displayName, - fleetArn: fleetArn - ) - return try await self.associateWebsiteCertificateAuthority(input, logger: logger) - } - - /// Creates a fleet. A fleet consists of resources and the configuration that delivers associated websites to authorized users who download and set up the Amazon WorkLink app. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func createFleet(_ input: CreateFleetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateFleetResponse { - try await self.client.execute( - operation: "CreateFleet", - path: "/createFleet", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Creates a fleet. A fleet consists of resources and the configuration that delivers associated websites to authorized users who download and set up the Amazon WorkLink app. - /// - /// Parameters: - /// - displayName: The fleet name to display. - /// - fleetName: A unique name for the fleet. - /// - optimizeForEndUserLocation: The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region. - /// - tags: The tags to add to the resource. A tag is a key-value pair. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func createFleet( - displayName: String? = nil, - fleetName: String, - optimizeForEndUserLocation: Bool? = nil, - tags: [String: String]? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> CreateFleetResponse { - let input = CreateFleetRequest( - displayName: displayName, - fleetName: fleetName, - optimizeForEndUserLocation: optimizeForEndUserLocation, - tags: tags - ) - return try await self.createFleet(input, logger: logger) - } - - /// Deletes a fleet. Prevents users from accessing previously associated websites. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func deleteFleet(_ input: DeleteFleetRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DeleteFleetResponse { - try await self.client.execute( - operation: "DeleteFleet", - path: "/deleteFleet", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Deletes a fleet. Prevents users from accessing previously associated websites. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func deleteFleet( - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DeleteFleetResponse { - let input = DeleteFleetRequest( - fleetArn: fleetArn - ) - return try await self.deleteFleet(input, logger: logger) - } - - /// Describes the configuration for delivering audit streams to the customer account. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeAuditStreamConfiguration(_ input: DescribeAuditStreamConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeAuditStreamConfigurationResponse { - try await self.client.execute( - operation: "DescribeAuditStreamConfiguration", - path: "/describeAuditStreamConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Describes the configuration for delivering audit streams to the customer account. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeAuditStreamConfiguration( - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeAuditStreamConfigurationResponse { - let input = DescribeAuditStreamConfigurationRequest( - fleetArn: fleetArn - ) - return try await self.describeAuditStreamConfiguration(input, logger: logger) - } - - /// Describes the networking configuration to access the internal websites associated with the specified fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeCompanyNetworkConfiguration(_ input: DescribeCompanyNetworkConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeCompanyNetworkConfigurationResponse { - try await self.client.execute( - operation: "DescribeCompanyNetworkConfiguration", - path: "/describeCompanyNetworkConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Describes the networking configuration to access the internal websites associated with the specified fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeCompanyNetworkConfiguration( - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeCompanyNetworkConfigurationResponse { - let input = DescribeCompanyNetworkConfigurationRequest( - fleetArn: fleetArn - ) - return try await self.describeCompanyNetworkConfiguration(input, logger: logger) - } - - /// Provides information about a user's device. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeDevice(_ input: DescribeDeviceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDeviceResponse { - try await self.client.execute( - operation: "DescribeDevice", - path: "/describeDevice", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Provides information about a user's device. - /// - /// Parameters: - /// - deviceId: A unique identifier for a registered user's device. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeDevice( - deviceId: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeDeviceResponse { - let input = DescribeDeviceRequest( - deviceId: deviceId, - fleetArn: fleetArn - ) - return try await self.describeDevice(input, logger: logger) - } - - /// Describes the device policy configuration for the specified fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeDevicePolicyConfiguration(_ input: DescribeDevicePolicyConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDevicePolicyConfigurationResponse { - try await self.client.execute( - operation: "DescribeDevicePolicyConfiguration", - path: "/describeDevicePolicyConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Describes the device policy configuration for the specified fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeDevicePolicyConfiguration( - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeDevicePolicyConfigurationResponse { - let input = DescribeDevicePolicyConfigurationRequest( - fleetArn: fleetArn - ) - return try await self.describeDevicePolicyConfiguration(input, logger: logger) - } - - /// Provides information about the domain. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeDomain(_ input: DescribeDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeDomainResponse { - try await self.client.execute( - operation: "DescribeDomain", - path: "/describeDomain", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Provides information about the domain. - /// - /// Parameters: - /// - domainName: The name of the domain. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeDomain( - domainName: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeDomainResponse { - let input = DescribeDomainRequest( - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.describeDomain(input, logger: logger) - } - - /// Provides basic information for the specified fleet, excluding identity provider, networking, and device configuration details. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeFleetMetadata(_ input: DescribeFleetMetadataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeFleetMetadataResponse { - try await self.client.execute( - operation: "DescribeFleetMetadata", - path: "/describeFleetMetadata", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Provides basic information for the specified fleet, excluding identity provider, networking, and device configuration details. - /// - /// Parameters: - /// - fleetArn: The Amazon Resource Name (ARN) of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeFleetMetadata( - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeFleetMetadataResponse { - let input = DescribeFleetMetadataRequest( - fleetArn: fleetArn - ) - return try await self.describeFleetMetadata(input, logger: logger) - } - - /// Describes the identity provider configuration of the specified fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeIdentityProviderConfiguration(_ input: DescribeIdentityProviderConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeIdentityProviderConfigurationResponse { - try await self.client.execute( - operation: "DescribeIdentityProviderConfiguration", - path: "/describeIdentityProviderConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Describes the identity provider configuration of the specified fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeIdentityProviderConfiguration( - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeIdentityProviderConfigurationResponse { - let input = DescribeIdentityProviderConfigurationRequest( - fleetArn: fleetArn - ) - return try await self.describeIdentityProviderConfiguration(input, logger: logger) - } - - /// Provides information about the certificate authority. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func describeWebsiteCertificateAuthority(_ input: DescribeWebsiteCertificateAuthorityRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DescribeWebsiteCertificateAuthorityResponse { - try await self.client.execute( - operation: "DescribeWebsiteCertificateAuthority", - path: "/describeWebsiteCertificateAuthority", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Provides information about the certificate authority. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - websiteCaId: A unique identifier for the certificate authority. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func describeWebsiteCertificateAuthority( - fleetArn: String, - websiteCaId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DescribeWebsiteCertificateAuthorityResponse { - let input = DescribeWebsiteCertificateAuthorityRequest( - fleetArn: fleetArn, - websiteCaId: websiteCaId - ) - return try await self.describeWebsiteCertificateAuthority(input, logger: logger) - } - - /// Disassociates a domain from Amazon WorkLink. End users lose the ability to access the domain with Amazon WorkLink. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func disassociateDomain(_ input: DisassociateDomainRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateDomainResponse { - try await self.client.execute( - operation: "DisassociateDomain", - path: "/disassociateDomain", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Disassociates a domain from Amazon WorkLink. End users lose the ability to access the domain with Amazon WorkLink. - /// - /// Parameters: - /// - domainName: The name of the domain. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func disassociateDomain( - domainName: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DisassociateDomainResponse { - let input = DisassociateDomainRequest( - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.disassociateDomain(input, logger: logger) - } - - /// Disassociates a website authorization provider from a specified fleet. After the disassociation, users can't load any associated websites that require this authorization provider. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func disassociateWebsiteAuthorizationProvider(_ input: DisassociateWebsiteAuthorizationProviderRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateWebsiteAuthorizationProviderResponse { - try await self.client.execute( - operation: "DisassociateWebsiteAuthorizationProvider", - path: "/disassociateWebsiteAuthorizationProvider", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Disassociates a website authorization provider from a specified fleet. After the disassociation, users can't load any associated websites that require this authorization provider. - /// - /// Parameters: - /// - authorizationProviderId: A unique identifier for the authorization provider. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func disassociateWebsiteAuthorizationProvider( - authorizationProviderId: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DisassociateWebsiteAuthorizationProviderResponse { - let input = DisassociateWebsiteAuthorizationProviderRequest( - authorizationProviderId: authorizationProviderId, - fleetArn: fleetArn - ) - return try await self.disassociateWebsiteAuthorizationProvider(input, logger: logger) - } - - /// Removes a certificate authority (CA). - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func disassociateWebsiteCertificateAuthority(_ input: DisassociateWebsiteCertificateAuthorityRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> DisassociateWebsiteCertificateAuthorityResponse { - try await self.client.execute( - operation: "DisassociateWebsiteCertificateAuthority", - path: "/disassociateWebsiteCertificateAuthority", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Removes a certificate authority (CA). - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - websiteCaId: A unique identifier for the CA. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func disassociateWebsiteCertificateAuthority( - fleetArn: String, - websiteCaId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> DisassociateWebsiteCertificateAuthorityResponse { - let input = DisassociateWebsiteCertificateAuthorityRequest( - fleetArn: fleetArn, - websiteCaId: websiteCaId - ) - return try await self.disassociateWebsiteCertificateAuthority(input, logger: logger) - } - - /// Retrieves a list of devices registered with the specified fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func listDevices(_ input: ListDevicesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDevicesResponse { - try await self.client.execute( - operation: "ListDevices", - path: "/listDevices", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Retrieves a list of devices registered with the specified fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - nextToken: The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listDevices( - fleetArn: String, - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListDevicesResponse { - let input = ListDevicesRequest( - fleetArn: fleetArn, - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listDevices(input, logger: logger) - } - - /// Retrieves a list of domains associated to a specified fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func listDomains(_ input: ListDomainsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListDomainsResponse { - try await self.client.execute( - operation: "ListDomains", - path: "/listDomains", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Retrieves a list of domains associated to a specified fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - nextToken: The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listDomains( - fleetArn: String, - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListDomainsResponse { - let input = ListDomainsRequest( - fleetArn: fleetArn, - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listDomains(input, logger: logger) - } - - /// Retrieves a list of fleets for the current account and Region. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func listFleets(_ input: ListFleetsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListFleetsResponse { - try await self.client.execute( - operation: "ListFleets", - path: "/listFleets", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Retrieves a list of fleets for the current account and Region. - /// - /// Parameters: - /// - maxResults: The maximum number of results to be included in the next page. - /// - nextToken: The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listFleets( - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListFleetsResponse { - let input = ListFleetsRequest( - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listFleets(input, logger: logger) - } - - /// Retrieves a list of tags for the specified resource. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func listTagsForResource(_ input: ListTagsForResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListTagsForResourceResponse { - try await self.client.execute( - operation: "ListTagsForResource", - path: "/tags/{ResourceArn}", - httpMethod: .GET, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Retrieves a list of tags for the specified resource. - /// - /// Parameters: - /// - resourceArn: The Amazon Resource Name (ARN) of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listTagsForResource( - resourceArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListTagsForResourceResponse { - let input = ListTagsForResourceRequest( - resourceArn: resourceArn - ) - return try await self.listTagsForResource(input, logger: logger) - } - - /// Retrieves a list of website authorization providers associated with a specified fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func listWebsiteAuthorizationProviders(_ input: ListWebsiteAuthorizationProvidersRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWebsiteAuthorizationProvidersResponse { - try await self.client.execute( - operation: "ListWebsiteAuthorizationProviders", - path: "/listWebsiteAuthorizationProviders", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Retrieves a list of website authorization providers associated with a specified fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - nextToken: The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listWebsiteAuthorizationProviders( - fleetArn: String, - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListWebsiteAuthorizationProvidersResponse { - let input = ListWebsiteAuthorizationProvidersRequest( - fleetArn: fleetArn, - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listWebsiteAuthorizationProviders(input, logger: logger) - } - - /// Retrieves a list of certificate authorities added for the current account and Region. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func listWebsiteCertificateAuthorities(_ input: ListWebsiteCertificateAuthoritiesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListWebsiteCertificateAuthoritiesResponse { - try await self.client.execute( - operation: "ListWebsiteCertificateAuthorities", - path: "/listWebsiteCertificateAuthorities", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Retrieves a list of certificate authorities added for the current account and Region. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - nextToken: The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listWebsiteCertificateAuthorities( - fleetArn: String, - maxResults: Int? = nil, - nextToken: String? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> ListWebsiteCertificateAuthoritiesResponse { - let input = ListWebsiteCertificateAuthoritiesRequest( - fleetArn: fleetArn, - maxResults: maxResults, - nextToken: nextToken - ) - return try await self.listWebsiteCertificateAuthorities(input, logger: logger) - } - - /// Moves a domain to ACTIVE status if it was in the INACTIVE status. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func restoreDomainAccess(_ input: RestoreDomainAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RestoreDomainAccessResponse { - try await self.client.execute( - operation: "RestoreDomainAccess", - path: "/restoreDomainAccess", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Moves a domain to ACTIVE status if it was in the INACTIVE status. - /// - /// Parameters: - /// - domainName: The name of the domain. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func restoreDomainAccess( - domainName: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> RestoreDomainAccessResponse { - let input = RestoreDomainAccessRequest( - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.restoreDomainAccess(input, logger: logger) - } - - /// Moves a domain to INACTIVE status if it was in the ACTIVE status. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func revokeDomainAccess(_ input: RevokeDomainAccessRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> RevokeDomainAccessResponse { - try await self.client.execute( - operation: "RevokeDomainAccess", - path: "/revokeDomainAccess", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Moves a domain to INACTIVE status if it was in the ACTIVE status. - /// - /// Parameters: - /// - domainName: The name of the domain. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func revokeDomainAccess( - domainName: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> RevokeDomainAccessResponse { - let input = RevokeDomainAccessRequest( - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.revokeDomainAccess(input, logger: logger) - } - - /// Signs the user out from all of their devices. The user can sign in again if they have valid credentials. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func signOutUser(_ input: SignOutUserRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> SignOutUserResponse { - try await self.client.execute( - operation: "SignOutUser", - path: "/signOutUser", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Signs the user out from all of their devices. The user can sign in again if they have valid credentials. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - username: The name of the user. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func signOutUser( - fleetArn: String, - username: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> SignOutUserResponse { - let input = SignOutUserRequest( - fleetArn: fleetArn, - username: username - ) - return try await self.signOutUser(input, logger: logger) - } - - /// Adds or overwrites one or more tags for the specified resource, such as a fleet. Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func tagResource(_ input: TagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> TagResourceResponse { - try await self.client.execute( - operation: "TagResource", - path: "/tags/{ResourceArn}", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Adds or overwrites one or more tags for the specified resource, such as a fleet. Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value. - /// - /// Parameters: - /// - resourceArn: The Amazon Resource Name (ARN) of the fleet. - /// - tags: The tags to add to the resource. A tag is a key-value pair. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func tagResource( - resourceArn: String, - tags: [String: String], - logger: Logger = AWSClient.loggingDisabled - ) async throws -> TagResourceResponse { - let input = TagResourceRequest( - resourceArn: resourceArn, - tags: tags - ) - return try await self.tagResource(input, logger: logger) - } - - /// Removes one or more tags from the specified resource. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func untagResource(_ input: UntagResourceRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UntagResourceResponse { - try await self.client.execute( - operation: "UntagResource", - path: "/tags/{ResourceArn}", - httpMethod: .DELETE, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Removes one or more tags from the specified resource. - /// - /// Parameters: - /// - resourceArn: The Amazon Resource Name (ARN) of the fleet. - /// - tagKeys: The list of tag keys to remove from the resource. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func untagResource( - resourceArn: String, - tagKeys: [String], - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UntagResourceResponse { - let input = UntagResourceRequest( - resourceArn: resourceArn, - tagKeys: tagKeys - ) - return try await self.untagResource(input, logger: logger) - } - - /// Updates the audit stream configuration for the fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func updateAuditStreamConfiguration(_ input: UpdateAuditStreamConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateAuditStreamConfigurationResponse { - try await self.client.execute( - operation: "UpdateAuditStreamConfiguration", - path: "/updateAuditStreamConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates the audit stream configuration for the fleet. - /// - /// Parameters: - /// - auditStreamArn: The ARN of the Amazon Kinesis data stream that receives the audit events. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func updateAuditStreamConfiguration( - auditStreamArn: String? = nil, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateAuditStreamConfigurationResponse { - let input = UpdateAuditStreamConfigurationRequest( - auditStreamArn: auditStreamArn, - fleetArn: fleetArn - ) - return try await self.updateAuditStreamConfiguration(input, logger: logger) - } - - /// Updates the company network configuration for the fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func updateCompanyNetworkConfiguration(_ input: UpdateCompanyNetworkConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateCompanyNetworkConfigurationResponse { - try await self.client.execute( - operation: "UpdateCompanyNetworkConfiguration", - path: "/updateCompanyNetworkConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates the company network configuration for the fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - securityGroupIds: The security groups associated with access to the provided subnets. - /// - subnetIds: The subnets used for X-ENI connections from Amazon WorkLink rendering containers. - /// - vpcId: The VPC with connectivity to associated websites. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func updateCompanyNetworkConfiguration( - fleetArn: String, - securityGroupIds: [String], - subnetIds: [String], - vpcId: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateCompanyNetworkConfigurationResponse { - let input = UpdateCompanyNetworkConfigurationRequest( - fleetArn: fleetArn, - securityGroupIds: securityGroupIds, - subnetIds: subnetIds, - vpcId: vpcId - ) - return try await self.updateCompanyNetworkConfiguration(input, logger: logger) - } - - /// Updates the device policy configuration for the fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func updateDevicePolicyConfiguration(_ input: UpdateDevicePolicyConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDevicePolicyConfigurationResponse { - try await self.client.execute( - operation: "UpdateDevicePolicyConfiguration", - path: "/updateDevicePolicyConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates the device policy configuration for the fleet. - /// - /// Parameters: - /// - deviceCaCertificate: The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func updateDevicePolicyConfiguration( - deviceCaCertificate: String? = nil, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateDevicePolicyConfigurationResponse { - let input = UpdateDevicePolicyConfigurationRequest( - deviceCaCertificate: deviceCaCertificate, - fleetArn: fleetArn - ) - return try await self.updateDevicePolicyConfiguration(input, logger: logger) - } - - /// Updates domain metadata, such as DisplayName. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func updateDomainMetadata(_ input: UpdateDomainMetadataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateDomainMetadataResponse { - try await self.client.execute( - operation: "UpdateDomainMetadata", - path: "/updateDomainMetadata", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates domain metadata, such as DisplayName. - /// - /// Parameters: - /// - displayName: The name to display. - /// - domainName: The name of the domain. - /// - fleetArn: The ARN of the fleet. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func updateDomainMetadata( - displayName: String? = nil, - domainName: String, - fleetArn: String, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateDomainMetadataResponse { - let input = UpdateDomainMetadataRequest( - displayName: displayName, - domainName: domainName, - fleetArn: fleetArn - ) - return try await self.updateDomainMetadata(input, logger: logger) - } - - /// Updates fleet metadata, such as DisplayName. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func updateFleetMetadata(_ input: UpdateFleetMetadataRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateFleetMetadataResponse { - try await self.client.execute( - operation: "UpdateFleetMetadata", - path: "/UpdateFleetMetadata", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates fleet metadata, such as DisplayName. - /// - /// Parameters: - /// - displayName: The fleet name to display. The existing DisplayName is unset if null is passed. - /// - fleetArn: The ARN of the fleet. - /// - optimizeForEndUserLocation: The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func updateFleetMetadata( - displayName: String? = nil, - fleetArn: String, - optimizeForEndUserLocation: Bool? = nil, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateFleetMetadataResponse { - let input = UpdateFleetMetadataRequest( - displayName: displayName, - fleetArn: fleetArn, - optimizeForEndUserLocation: optimizeForEndUserLocation - ) - return try await self.updateFleetMetadata(input, logger: logger) - } - - /// Updates the identity provider configuration for the fleet. - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @Sendable - @inlinable - public func updateIdentityProviderConfiguration(_ input: UpdateIdentityProviderConfigurationRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> UpdateIdentityProviderConfigurationResponse { - try await self.client.execute( - operation: "UpdateIdentityProviderConfiguration", - path: "/updateIdentityProviderConfiguration", - httpMethod: .POST, - serviceConfig: self.config, - input: input, - logger: logger - ) - } - /// Updates the identity provider configuration for the fleet. - /// - /// Parameters: - /// - fleetArn: The ARN of the fleet. - /// - identityProviderSamlMetadata: The SAML metadata document provided by the customer’s identity provider. The existing IdentityProviderSamlMetadata is unset if null is passed. - /// - identityProviderType: The type of identity provider. - /// - logger: Logger use during operation - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func updateIdentityProviderConfiguration( - fleetArn: String, - identityProviderSamlMetadata: String? = nil, - identityProviderType: IdentityProviderType, - logger: Logger = AWSClient.loggingDisabled - ) async throws -> UpdateIdentityProviderConfigurationResponse { - let input = UpdateIdentityProviderConfigurationRequest( - fleetArn: fleetArn, - identityProviderSamlMetadata: identityProviderSamlMetadata, - identityProviderType: identityProviderType - ) - return try await self.updateIdentityProviderConfiguration(input, logger: logger) - } -} - -extension WorkLink { - /// Initializer required by `AWSService.with(middlewares:timeout:byteBufferAllocator:options)`. You are not able to use this initializer directly as there are not public - /// initializers for `AWSServiceConfig.Patch`. Please use `AWSService.with(middlewares:timeout:byteBufferAllocator:options)` instead. - public init(from: WorkLink, patch: AWSServiceConfig.Patch) { - self.client = from.client - self.config = from.config.with(patch: patch) - } -} - -// MARK: Paginators - -@available(macOS 10.15, iOS 13.0, tvOS 13.0, watchOS 6.0, *) -extension WorkLink { - /// Return PaginatorSequence for operation ``listDevices(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listDevicesPaginator( - _ input: ListDevicesRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listDevices, - inputKey: \ListDevicesRequest.nextToken, - outputKey: \ListDevicesResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listDevices(_:logger:)``. - /// - /// - Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listDevicesPaginator( - fleetArn: String, - maxResults: Int? = nil, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListDevicesRequest( - fleetArn: fleetArn, - maxResults: maxResults - ) - return self.listDevicesPaginator(input, logger: logger) - } - - /// Return PaginatorSequence for operation ``listDomains(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listDomainsPaginator( - _ input: ListDomainsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listDomains, - inputKey: \ListDomainsRequest.nextToken, - outputKey: \ListDomainsResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listDomains(_:logger:)``. - /// - /// - Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listDomainsPaginator( - fleetArn: String, - maxResults: Int? = nil, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListDomainsRequest( - fleetArn: fleetArn, - maxResults: maxResults - ) - return self.listDomainsPaginator(input, logger: logger) - } - - /// Return PaginatorSequence for operation ``listFleets(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listFleetsPaginator( - _ input: ListFleetsRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listFleets, - inputKey: \ListFleetsRequest.nextToken, - outputKey: \ListFleetsResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listFleets(_:logger:)``. - /// - /// - Parameters: - /// - maxResults: The maximum number of results to be included in the next page. - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listFleetsPaginator( - maxResults: Int? = nil, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListFleetsRequest( - maxResults: maxResults - ) - return self.listFleetsPaginator(input, logger: logger) - } - - /// Return PaginatorSequence for operation ``listWebsiteAuthorizationProviders(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listWebsiteAuthorizationProvidersPaginator( - _ input: ListWebsiteAuthorizationProvidersRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listWebsiteAuthorizationProviders, - inputKey: \ListWebsiteAuthorizationProvidersRequest.nextToken, - outputKey: \ListWebsiteAuthorizationProvidersResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listWebsiteAuthorizationProviders(_:logger:)``. - /// - /// - Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listWebsiteAuthorizationProvidersPaginator( - fleetArn: String, - maxResults: Int? = nil, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListWebsiteAuthorizationProvidersRequest( - fleetArn: fleetArn, - maxResults: maxResults - ) - return self.listWebsiteAuthorizationProvidersPaginator(input, logger: logger) - } - - /// Return PaginatorSequence for operation ``listWebsiteCertificateAuthorities(_:logger:)``. - /// - /// - Parameters: - /// - input: Input for operation - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listWebsiteCertificateAuthoritiesPaginator( - _ input: ListWebsiteCertificateAuthoritiesRequest, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - return .init( - input: input, - command: self.listWebsiteCertificateAuthorities, - inputKey: \ListWebsiteCertificateAuthoritiesRequest.nextToken, - outputKey: \ListWebsiteCertificateAuthoritiesResponse.nextToken, - logger: logger - ) - } - /// Return PaginatorSequence for operation ``listWebsiteCertificateAuthorities(_:logger:)``. - /// - /// - Parameters: - /// - fleetArn: The ARN of the fleet. - /// - maxResults: The maximum number of results to be included in the next page. - /// - logger: Logger used for logging - @available(*, deprecated, message: "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.") - @inlinable - public func listWebsiteCertificateAuthoritiesPaginator( - fleetArn: String, - maxResults: Int? = nil, - logger: Logger = AWSClient.loggingDisabled - ) -> AWSClient.PaginatorSequence { - let input = ListWebsiteCertificateAuthoritiesRequest( - fleetArn: fleetArn, - maxResults: maxResults - ) - return self.listWebsiteCertificateAuthoritiesPaginator(input, logger: logger) - } -} - -extension WorkLink.ListDevicesRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> WorkLink.ListDevicesRequest { - return .init( - fleetArn: self.fleetArn, - maxResults: self.maxResults, - nextToken: token - ) - } -} - -extension WorkLink.ListDomainsRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> WorkLink.ListDomainsRequest { - return .init( - fleetArn: self.fleetArn, - maxResults: self.maxResults, - nextToken: token - ) - } -} - -extension WorkLink.ListFleetsRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> WorkLink.ListFleetsRequest { - return .init( - maxResults: self.maxResults, - nextToken: token - ) - } -} - -extension WorkLink.ListWebsiteAuthorizationProvidersRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> WorkLink.ListWebsiteAuthorizationProvidersRequest { - return .init( - fleetArn: self.fleetArn, - maxResults: self.maxResults, - nextToken: token - ) - } -} - -extension WorkLink.ListWebsiteCertificateAuthoritiesRequest: AWSPaginateToken { - @inlinable - public func usingPaginationToken(_ token: String) -> WorkLink.ListWebsiteCertificateAuthoritiesRequest { - return .init( - fleetArn: self.fleetArn, - maxResults: self.maxResults, - nextToken: token - ) - } -} diff --git a/Sources/Soto/Services/WorkLink/WorkLink_shapes.swift b/Sources/Soto/Services/WorkLink/WorkLink_shapes.swift deleted file mode 100644 index 737a07ee8a..0000000000 --- a/Sources/Soto/Services/WorkLink/WorkLink_shapes.swift +++ /dev/null @@ -1,1582 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Soto for AWS open source project -// -// Copyright (c) 2017-2024 the Soto project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.txt for the list of Soto project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -// THIS FILE IS AUTOMATICALLY GENERATED by https://github.com/soto-project/soto-codegenerator. -// DO NOT EDIT. - -#if os(Linux) && compiler(<5.10) -// swift-corelibs-foundation hasn't been updated with Sendable conformances -@preconcurrency import Foundation -#else -import Foundation -#endif -@_spi(SotoInternal) import SotoCore - -extension WorkLink { - // MARK: Enums - - public enum AuthorizationProviderType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case saml = "SAML" - public var description: String { return self.rawValue } - } - - public enum DeviceStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case active = "ACTIVE" - case signedOut = "SIGNED_OUT" - public var description: String { return self.rawValue } - } - - public enum DomainStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case active = "ACTIVE" - case associating = "ASSOCIATING" - case disassociated = "DISASSOCIATED" - case disassociating = "DISASSOCIATING" - case failedToAssociate = "FAILED_TO_ASSOCIATE" - case failedToDisassociate = "FAILED_TO_DISASSOCIATE" - case inactive = "INACTIVE" - case pendingValidation = "PENDING_VALIDATION" - public var description: String { return self.rawValue } - } - - public enum FleetStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case active = "ACTIVE" - case creating = "CREATING" - case deleted = "DELETED" - case deleting = "DELETING" - case failedToCreate = "FAILED_TO_CREATE" - case failedToDelete = "FAILED_TO_DELETE" - public var description: String { return self.rawValue } - } - - public enum IdentityProviderType: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { - case saml = "SAML" - public var description: String { return self.rawValue } - } - - // MARK: Shapes - - public struct AssociateDomainRequest: AWSEncodableShape { - /// The ARN of an issued ACM certificate that is valid for the domain being associated. - public let acmCertificateArn: String - /// The name to display. - public let displayName: String? - /// The fully qualified domain name (FQDN). - public let domainName: String - /// The Amazon Resource Name (ARN) of the fleet. - public let fleetArn: String - - @inlinable - public init(acmCertificateArn: String, displayName: String? = nil, domainName: String, fleetArn: String) { - self.acmCertificateArn = acmCertificateArn - self.displayName = displayName - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.acmCertificateArn, name: "acmCertificateArn", parent: name, pattern: "^arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=/,.@-]+)*$") - try self.validate(self.displayName, name: "displayName", parent: name, max: 100) - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case acmCertificateArn = "AcmCertificateArn" - case displayName = "DisplayName" - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct AssociateDomainResponse: AWSDecodableShape { - public init() {} - } - - public struct AssociateWebsiteAuthorizationProviderRequest: AWSEncodableShape { - /// The authorization provider type. - public let authorizationProviderType: AuthorizationProviderType - /// The domain name of the authorization provider. This applies only to SAML-based authorization providers. - public let domainName: String? - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(authorizationProviderType: AuthorizationProviderType, domainName: String? = nil, fleetArn: String) { - self.authorizationProviderType = authorizationProviderType - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case authorizationProviderType = "AuthorizationProviderType" - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct AssociateWebsiteAuthorizationProviderResponse: AWSDecodableShape { - /// A unique identifier for the authorization provider. - public let authorizationProviderId: String? - - @inlinable - public init(authorizationProviderId: String? = nil) { - self.authorizationProviderId = authorizationProviderId - } - - private enum CodingKeys: String, CodingKey { - case authorizationProviderId = "AuthorizationProviderId" - } - } - - public struct AssociateWebsiteCertificateAuthorityRequest: AWSEncodableShape { - /// The root certificate of the CA. - public let certificate: String - /// The certificate name to display. - public let displayName: String? - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(certificate: String, displayName: String? = nil, fleetArn: String) { - self.certificate = certificate - self.displayName = displayName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.certificate, name: "certificate", parent: name, max: 8192) - try self.validate(self.certificate, name: "certificate", parent: name, min: 1) - try self.validate(self.certificate, name: "certificate", parent: name, pattern: "^-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?$") - try self.validate(self.displayName, name: "displayName", parent: name, max: 100) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case certificate = "Certificate" - case displayName = "DisplayName" - case fleetArn = "FleetArn" - } - } - - public struct AssociateWebsiteCertificateAuthorityResponse: AWSDecodableShape { - /// A unique identifier for the CA. - public let websiteCaId: String? - - @inlinable - public init(websiteCaId: String? = nil) { - self.websiteCaId = websiteCaId - } - - private enum CodingKeys: String, CodingKey { - case websiteCaId = "WebsiteCaId" - } - } - - public struct CreateFleetRequest: AWSEncodableShape { - /// The fleet name to display. - public let displayName: String? - /// A unique name for the fleet. - public let fleetName: String - /// The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region. - public let optimizeForEndUserLocation: Bool? - /// The tags to add to the resource. A tag is a key-value pair. - public let tags: [String: String]? - - @inlinable - public init(displayName: String? = nil, fleetName: String, optimizeForEndUserLocation: Bool? = nil, tags: [String: String]? = nil) { - self.displayName = displayName - self.fleetName = fleetName - self.optimizeForEndUserLocation = optimizeForEndUserLocation - self.tags = tags - } - - public func validate(name: String) throws { - try self.validate(self.displayName, name: "displayName", parent: name, max: 100) - try self.validate(self.fleetName, name: "fleetName", parent: name, max: 48) - try self.validate(self.fleetName, name: "fleetName", parent: name, min: 1) - try self.validate(self.fleetName, name: "fleetName", parent: name, pattern: "^[a-z0-9](?:[a-z0-9\\-]{0,46}[a-z0-9])?$") - try self.tags?.forEach { - try validate($0.key, name: "tags.key", parent: name, max: 128) - try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) - } - try self.validate(self.tags, name: "tags", parent: name, max: 50) - try self.validate(self.tags, name: "tags", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case displayName = "DisplayName" - case fleetName = "FleetName" - case optimizeForEndUserLocation = "OptimizeForEndUserLocation" - case tags = "Tags" - } - } - - public struct CreateFleetResponse: AWSDecodableShape { - /// The Amazon Resource Name (ARN) of the fleet. - public let fleetArn: String? - - @inlinable - public init(fleetArn: String? = nil) { - self.fleetArn = fleetArn - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DeleteFleetRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(fleetArn: String) { - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DeleteFleetResponse: AWSDecodableShape { - public init() {} - } - - public struct DescribeAuditStreamConfigurationRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(fleetArn: String) { - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DescribeAuditStreamConfigurationResponse: AWSDecodableShape { - /// The ARN of the Amazon Kinesis data stream that will receive the audit events. - public let auditStreamArn: String? - - @inlinable - public init(auditStreamArn: String? = nil) { - self.auditStreamArn = auditStreamArn - } - - private enum CodingKeys: String, CodingKey { - case auditStreamArn = "AuditStreamArn" - } - } - - public struct DescribeCompanyNetworkConfigurationRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(fleetArn: String) { - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DescribeCompanyNetworkConfigurationResponse: AWSDecodableShape { - /// The security groups associated with access to the provided subnets. - public let securityGroupIds: [String]? - /// The subnets used for X-ENI connections from Amazon WorkLink rendering containers. - public let subnetIds: [String]? - /// The VPC with connectivity to associated websites. - public let vpcId: String? - - @inlinable - public init(securityGroupIds: [String]? = nil, subnetIds: [String]? = nil, vpcId: String? = nil) { - self.securityGroupIds = securityGroupIds - self.subnetIds = subnetIds - self.vpcId = vpcId - } - - private enum CodingKeys: String, CodingKey { - case securityGroupIds = "SecurityGroupIds" - case subnetIds = "SubnetIds" - case vpcId = "VpcId" - } - } - - public struct DescribeDevicePolicyConfigurationRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(fleetArn: String) { - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DescribeDevicePolicyConfigurationResponse: AWSDecodableShape { - /// The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates. - public let deviceCaCertificate: String? - - @inlinable - public init(deviceCaCertificate: String? = nil) { - self.deviceCaCertificate = deviceCaCertificate - } - - private enum CodingKeys: String, CodingKey { - case deviceCaCertificate = "DeviceCaCertificate" - } - } - - public struct DescribeDeviceRequest: AWSEncodableShape { - /// A unique identifier for a registered user's device. - public let deviceId: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(deviceId: String, fleetArn: String) { - self.deviceId = deviceId - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.deviceId, name: "deviceId", parent: name, max: 256) - try self.validate(self.deviceId, name: "deviceId", parent: name, min: 1) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case deviceId = "DeviceId" - case fleetArn = "FleetArn" - } - } - - public struct DescribeDeviceResponse: AWSDecodableShape { - /// The date that the device first signed in to Amazon WorkLink. - public let firstAccessedTime: Date? - /// The date that the device last accessed Amazon WorkLink. - public let lastAccessedTime: Date? - /// The manufacturer of the device. - public let manufacturer: String? - /// The model of the device. - public let model: String? - /// The operating system of the device. - public let operatingSystem: String? - /// The operating system version of the device. - public let operatingSystemVersion: String? - /// The operating system patch level of the device. - public let patchLevel: String? - /// The current state of the device. - public let status: DeviceStatus? - /// The user name associated with the device. - public let username: String? - - @inlinable - public init(firstAccessedTime: Date? = nil, lastAccessedTime: Date? = nil, manufacturer: String? = nil, model: String? = nil, operatingSystem: String? = nil, operatingSystemVersion: String? = nil, patchLevel: String? = nil, status: DeviceStatus? = nil, username: String? = nil) { - self.firstAccessedTime = firstAccessedTime - self.lastAccessedTime = lastAccessedTime - self.manufacturer = manufacturer - self.model = model - self.operatingSystem = operatingSystem - self.operatingSystemVersion = operatingSystemVersion - self.patchLevel = patchLevel - self.status = status - self.username = username - } - - private enum CodingKeys: String, CodingKey { - case firstAccessedTime = "FirstAccessedTime" - case lastAccessedTime = "LastAccessedTime" - case manufacturer = "Manufacturer" - case model = "Model" - case operatingSystem = "OperatingSystem" - case operatingSystemVersion = "OperatingSystemVersion" - case patchLevel = "PatchLevel" - case status = "Status" - case username = "Username" - } - } - - public struct DescribeDomainRequest: AWSEncodableShape { - /// The name of the domain. - public let domainName: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(domainName: String, fleetArn: String) { - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct DescribeDomainResponse: AWSDecodableShape { - /// The ARN of an issued ACM certificate that is valid for the domain being associated. - public let acmCertificateArn: String? - /// The time that the domain was added. - public let createdTime: Date? - /// The name to display. - public let displayName: String? - /// The name of the domain. - public let domainName: String? - /// The current state for the domain. - public let domainStatus: DomainStatus? - - @inlinable - public init(acmCertificateArn: String? = nil, createdTime: Date? = nil, displayName: String? = nil, domainName: String? = nil, domainStatus: DomainStatus? = nil) { - self.acmCertificateArn = acmCertificateArn - self.createdTime = createdTime - self.displayName = displayName - self.domainName = domainName - self.domainStatus = domainStatus - } - - private enum CodingKeys: String, CodingKey { - case acmCertificateArn = "AcmCertificateArn" - case createdTime = "CreatedTime" - case displayName = "DisplayName" - case domainName = "DomainName" - case domainStatus = "DomainStatus" - } - } - - public struct DescribeFleetMetadataRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the fleet. - public let fleetArn: String - - @inlinable - public init(fleetArn: String) { - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DescribeFleetMetadataResponse: AWSDecodableShape { - /// The identifier used by users to sign in to the Amazon WorkLink app. - public let companyCode: String? - /// The time that the fleet was created. - public let createdTime: Date? - /// The name to display. - public let displayName: String? - /// The name of the fleet. - public let fleetName: String? - /// The current state of the fleet. - public let fleetStatus: FleetStatus? - /// The time that the fleet was last updated. - public let lastUpdatedTime: Date? - /// The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region. - public let optimizeForEndUserLocation: Bool? - /// The tags attached to the resource. A tag is a key-value pair. - public let tags: [String: String]? - - @inlinable - public init(companyCode: String? = nil, createdTime: Date? = nil, displayName: String? = nil, fleetName: String? = nil, fleetStatus: FleetStatus? = nil, lastUpdatedTime: Date? = nil, optimizeForEndUserLocation: Bool? = nil, tags: [String: String]? = nil) { - self.companyCode = companyCode - self.createdTime = createdTime - self.displayName = displayName - self.fleetName = fleetName - self.fleetStatus = fleetStatus - self.lastUpdatedTime = lastUpdatedTime - self.optimizeForEndUserLocation = optimizeForEndUserLocation - self.tags = tags - } - - private enum CodingKeys: String, CodingKey { - case companyCode = "CompanyCode" - case createdTime = "CreatedTime" - case displayName = "DisplayName" - case fleetName = "FleetName" - case fleetStatus = "FleetStatus" - case lastUpdatedTime = "LastUpdatedTime" - case optimizeForEndUserLocation = "OptimizeForEndUserLocation" - case tags = "Tags" - } - } - - public struct DescribeIdentityProviderConfigurationRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(fleetArn: String) { - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - } - } - - public struct DescribeIdentityProviderConfigurationResponse: AWSDecodableShape { - /// The SAML metadata document provided by the user’s identity provider. - public let identityProviderSamlMetadata: String? - /// The type of identity provider. - public let identityProviderType: IdentityProviderType? - /// The SAML metadata document uploaded to the user’s identity provider. - public let serviceProviderSamlMetadata: String? - - @inlinable - public init(identityProviderSamlMetadata: String? = nil, identityProviderType: IdentityProviderType? = nil, serviceProviderSamlMetadata: String? = nil) { - self.identityProviderSamlMetadata = identityProviderSamlMetadata - self.identityProviderType = identityProviderType - self.serviceProviderSamlMetadata = serviceProviderSamlMetadata - } - - private enum CodingKeys: String, CodingKey { - case identityProviderSamlMetadata = "IdentityProviderSamlMetadata" - case identityProviderType = "IdentityProviderType" - case serviceProviderSamlMetadata = "ServiceProviderSamlMetadata" - } - } - - public struct DescribeWebsiteCertificateAuthorityRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// A unique identifier for the certificate authority. - public let websiteCaId: String - - @inlinable - public init(fleetArn: String, websiteCaId: String) { - self.fleetArn = fleetArn - self.websiteCaId = websiteCaId - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.websiteCaId, name: "websiteCaId", parent: name, max: 256) - try self.validate(self.websiteCaId, name: "websiteCaId", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case websiteCaId = "WebsiteCaId" - } - } - - public struct DescribeWebsiteCertificateAuthorityResponse: AWSDecodableShape { - /// The root certificate of the certificate authority. - public let certificate: String? - /// The time that the certificate authority was added. - public let createdTime: Date? - /// The certificate name to display. - public let displayName: String? - - @inlinable - public init(certificate: String? = nil, createdTime: Date? = nil, displayName: String? = nil) { - self.certificate = certificate - self.createdTime = createdTime - self.displayName = displayName - } - - private enum CodingKeys: String, CodingKey { - case certificate = "Certificate" - case createdTime = "CreatedTime" - case displayName = "DisplayName" - } - } - - public struct DeviceSummary: AWSDecodableShape { - /// The ID of the device. - public let deviceId: String? - /// The status of the device. - public let deviceStatus: DeviceStatus? - - @inlinable - public init(deviceId: String? = nil, deviceStatus: DeviceStatus? = nil) { - self.deviceId = deviceId - self.deviceStatus = deviceStatus - } - - private enum CodingKeys: String, CodingKey { - case deviceId = "DeviceId" - case deviceStatus = "DeviceStatus" - } - } - - public struct DisassociateDomainRequest: AWSEncodableShape { - /// The name of the domain. - public let domainName: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(domainName: String, fleetArn: String) { - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct DisassociateDomainResponse: AWSDecodableShape { - public init() {} - } - - public struct DisassociateWebsiteAuthorizationProviderRequest: AWSEncodableShape { - /// A unique identifier for the authorization provider. - public let authorizationProviderId: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(authorizationProviderId: String, fleetArn: String) { - self.authorizationProviderId = authorizationProviderId - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.authorizationProviderId, name: "authorizationProviderId", parent: name, max: 256) - try self.validate(self.authorizationProviderId, name: "authorizationProviderId", parent: name, min: 1) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case authorizationProviderId = "AuthorizationProviderId" - case fleetArn = "FleetArn" - } - } - - public struct DisassociateWebsiteAuthorizationProviderResponse: AWSDecodableShape { - public init() {} - } - - public struct DisassociateWebsiteCertificateAuthorityRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// A unique identifier for the CA. - public let websiteCaId: String - - @inlinable - public init(fleetArn: String, websiteCaId: String) { - self.fleetArn = fleetArn - self.websiteCaId = websiteCaId - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.websiteCaId, name: "websiteCaId", parent: name, max: 256) - try self.validate(self.websiteCaId, name: "websiteCaId", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case websiteCaId = "WebsiteCaId" - } - } - - public struct DisassociateWebsiteCertificateAuthorityResponse: AWSDecodableShape { - public init() {} - } - - public struct DomainSummary: AWSDecodableShape { - /// The time that the domain was created. - public let createdTime: Date - /// The name to display. - public let displayName: String? - /// The name of the domain. - public let domainName: String - /// The status of the domain. - public let domainStatus: DomainStatus - - @inlinable - public init(createdTime: Date, displayName: String? = nil, domainName: String, domainStatus: DomainStatus) { - self.createdTime = createdTime - self.displayName = displayName - self.domainName = domainName - self.domainStatus = domainStatus - } - - private enum CodingKeys: String, CodingKey { - case createdTime = "CreatedTime" - case displayName = "DisplayName" - case domainName = "DomainName" - case domainStatus = "DomainStatus" - } - } - - public struct FleetSummary: AWSDecodableShape { - /// The identifier used by users to sign into the Amazon WorkLink app. - public let companyCode: String? - /// The time when the fleet was created. - public let createdTime: Date? - /// The name of the fleet to display. - public let displayName: String? - /// The Amazon Resource Name (ARN) of the fleet. - public let fleetArn: String? - /// The name of the fleet. - public let fleetName: String? - /// The status of the fleet. - public let fleetStatus: FleetStatus? - /// The time when the fleet was last updated. - public let lastUpdatedTime: Date? - /// The tags attached to the resource. A tag is a key-value pair. - public let tags: [String: String]? - - @inlinable - public init(companyCode: String? = nil, createdTime: Date? = nil, displayName: String? = nil, fleetArn: String? = nil, fleetName: String? = nil, fleetStatus: FleetStatus? = nil, lastUpdatedTime: Date? = nil, tags: [String: String]? = nil) { - self.companyCode = companyCode - self.createdTime = createdTime - self.displayName = displayName - self.fleetArn = fleetArn - self.fleetName = fleetName - self.fleetStatus = fleetStatus - self.lastUpdatedTime = lastUpdatedTime - self.tags = tags - } - - private enum CodingKeys: String, CodingKey { - case companyCode = "CompanyCode" - case createdTime = "CreatedTime" - case displayName = "DisplayName" - case fleetArn = "FleetArn" - case fleetName = "FleetName" - case fleetStatus = "FleetStatus" - case lastUpdatedTime = "LastUpdatedTime" - case tags = "Tags" - } - } - - public struct ListDevicesRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The maximum number of results to be included in the next page. - public let maxResults: Int? - /// The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - public let nextToken: String? - - @inlinable - public init(fleetArn: String, maxResults: Int? = nil, nextToken: String? = nil) { - self.fleetArn = fleetArn - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w\\-]+$") - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case maxResults = "MaxResults" - case nextToken = "NextToken" - } - } - - public struct ListDevicesResponse: AWSDecodableShape { - /// Information about the devices. - public let devices: [DeviceSummary]? - /// The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null. - public let nextToken: String? - - @inlinable - public init(devices: [DeviceSummary]? = nil, nextToken: String? = nil) { - self.devices = devices - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case devices = "Devices" - case nextToken = "NextToken" - } - } - - public struct ListDomainsRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The maximum number of results to be included in the next page. - public let maxResults: Int? - /// The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - public let nextToken: String? - - @inlinable - public init(fleetArn: String, maxResults: Int? = nil, nextToken: String? = nil) { - self.fleetArn = fleetArn - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w\\-]+$") - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case maxResults = "MaxResults" - case nextToken = "NextToken" - } - } - - public struct ListDomainsResponse: AWSDecodableShape { - /// Information about the domains. - public let domains: [DomainSummary]? - /// The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null. - public let nextToken: String? - - @inlinable - public init(domains: [DomainSummary]? = nil, nextToken: String? = nil) { - self.domains = domains - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case domains = "Domains" - case nextToken = "NextToken" - } - } - - public struct ListFleetsRequest: AWSEncodableShape { - /// The maximum number of results to be included in the next page. - public let maxResults: Int? - /// The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - public let nextToken: String? - - @inlinable - public init(maxResults: Int? = nil, nextToken: String? = nil) { - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func validate(name: String) throws { - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w\\-]+$") - } - - private enum CodingKeys: String, CodingKey { - case maxResults = "MaxResults" - case nextToken = "NextToken" - } - } - - public struct ListFleetsResponse: AWSDecodableShape { - /// The summary list of the fleets. - public let fleetSummaryList: [FleetSummary]? - /// The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null. - public let nextToken: String? - - @inlinable - public init(fleetSummaryList: [FleetSummary]? = nil, nextToken: String? = nil) { - self.fleetSummaryList = fleetSummaryList - self.nextToken = nextToken - } - - private enum CodingKeys: String, CodingKey { - case fleetSummaryList = "FleetSummaryList" - case nextToken = "NextToken" - } - } - - public struct ListTagsForResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the fleet. - public let resourceArn: String - - @inlinable - public init(resourceArn: String) { - self.resourceArn = resourceArn - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "ResourceArn") - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) - } - - private enum CodingKeys: CodingKey {} - } - - public struct ListTagsForResourceResponse: AWSDecodableShape { - /// The tags attached to the resource. A tag is a key-value pair. - public let tags: [String: String]? - - @inlinable - public init(tags: [String: String]? = nil) { - self.tags = tags - } - - private enum CodingKeys: String, CodingKey { - case tags = "Tags" - } - } - - public struct ListWebsiteAuthorizationProvidersRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The maximum number of results to be included in the next page. - public let maxResults: Int? - /// The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - public let nextToken: String? - - @inlinable - public init(fleetArn: String, maxResults: Int? = nil, nextToken: String? = nil) { - self.fleetArn = fleetArn - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w\\-]+$") - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case maxResults = "MaxResults" - case nextToken = "NextToken" - } - } - - public struct ListWebsiteAuthorizationProvidersResponse: AWSDecodableShape { - /// The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - public let nextToken: String? - /// The website authorization providers. - public let websiteAuthorizationProviders: [WebsiteAuthorizationProviderSummary]? - - @inlinable - public init(nextToken: String? = nil, websiteAuthorizationProviders: [WebsiteAuthorizationProviderSummary]? = nil) { - self.nextToken = nextToken - self.websiteAuthorizationProviders = websiteAuthorizationProviders - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "NextToken" - case websiteAuthorizationProviders = "WebsiteAuthorizationProviders" - } - } - - public struct ListWebsiteCertificateAuthoritiesRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The maximum number of results to be included in the next page. - public let maxResults: Int? - /// The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page. - public let nextToken: String? - - @inlinable - public init(fleetArn: String, maxResults: Int? = nil, nextToken: String? = nil) { - self.fleetArn = fleetArn - self.maxResults = maxResults - self.nextToken = nextToken - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, max: 4096) - try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) - try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^[\\w\\-]+$") - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case maxResults = "MaxResults" - case nextToken = "NextToken" - } - } - - public struct ListWebsiteCertificateAuthoritiesResponse: AWSDecodableShape { - /// The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null. - public let nextToken: String? - /// Information about the certificates. - public let websiteCertificateAuthorities: [WebsiteCaSummary]? - - @inlinable - public init(nextToken: String? = nil, websiteCertificateAuthorities: [WebsiteCaSummary]? = nil) { - self.nextToken = nextToken - self.websiteCertificateAuthorities = websiteCertificateAuthorities - } - - private enum CodingKeys: String, CodingKey { - case nextToken = "NextToken" - case websiteCertificateAuthorities = "WebsiteCertificateAuthorities" - } - } - - public struct RestoreDomainAccessRequest: AWSEncodableShape { - /// The name of the domain. - public let domainName: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(domainName: String, fleetArn: String) { - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct RestoreDomainAccessResponse: AWSDecodableShape { - public init() {} - } - - public struct RevokeDomainAccessRequest: AWSEncodableShape { - /// The name of the domain. - public let domainName: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(domainName: String, fleetArn: String) { - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct RevokeDomainAccessResponse: AWSDecodableShape { - public init() {} - } - - public struct SignOutUserRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The name of the user. - public let username: String - - @inlinable - public init(fleetArn: String, username: String) { - self.fleetArn = fleetArn - self.username = username - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.username, name: "username", parent: name, max: 256) - try self.validate(self.username, name: "username", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case username = "Username" - } - } - - public struct SignOutUserResponse: AWSDecodableShape { - public init() {} - } - - public struct TagResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the fleet. - public let resourceArn: String - /// The tags to add to the resource. A tag is a key-value pair. - public let tags: [String: String] - - @inlinable - public init(resourceArn: String, tags: [String: String]) { - self.resourceArn = resourceArn - self.tags = tags - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - var container = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "ResourceArn") - try container.encode(self.tags, forKey: .tags) - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) - try self.tags.forEach { - try validate($0.key, name: "tags.key", parent: name, max: 128) - try validate($0.key, name: "tags.key", parent: name, min: 1) - try validate($0.key, name: "tags.key", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - try validate($0.value, name: "tags[\"\($0.key)\"]", parent: name, max: 256) - } - try self.validate(self.tags, name: "tags", parent: name, max: 50) - try self.validate(self.tags, name: "tags", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case tags = "Tags" - } - } - - public struct TagResourceResponse: AWSDecodableShape { - public init() {} - } - - public struct UntagResourceRequest: AWSEncodableShape { - /// The Amazon Resource Name (ARN) of the fleet. - public let resourceArn: String - /// The list of tag keys to remove from the resource. - public let tagKeys: [String] - - @inlinable - public init(resourceArn: String, tagKeys: [String]) { - self.resourceArn = resourceArn - self.tagKeys = tagKeys - } - - public func encode(to encoder: Encoder) throws { - let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer - _ = encoder.container(keyedBy: CodingKeys.self) - request.encodePath(self.resourceArn, key: "ResourceArn") - request.encodeQuery(self.tagKeys, key: "tagKeys") - } - - public func validate(name: String) throws { - try self.validate(self.resourceArn, name: "resourceArn", parent: name, max: 2048) - try self.validate(self.resourceArn, name: "resourceArn", parent: name, min: 20) - try self.tagKeys.forEach { - try validate($0, name: "tagKeys[]", parent: name, max: 128) - try validate($0, name: "tagKeys[]", parent: name, min: 1) - try validate($0, name: "tagKeys[]", parent: name, pattern: "^(?!aws:)[a-zA-Z+-=._:/]+$") - } - try self.validate(self.tagKeys, name: "tagKeys", parent: name, max: 50) - try self.validate(self.tagKeys, name: "tagKeys", parent: name, min: 1) - } - - private enum CodingKeys: CodingKey {} - } - - public struct UntagResourceResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateAuditStreamConfigurationRequest: AWSEncodableShape { - /// The ARN of the Amazon Kinesis data stream that receives the audit events. - public let auditStreamArn: String? - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(auditStreamArn: String? = nil, fleetArn: String) { - self.auditStreamArn = auditStreamArn - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.auditStreamArn, name: "auditStreamArn", parent: name, pattern: "^arn:aws:kinesis:.+:[0-9]{12}:stream/AmazonWorkLink-.*$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case auditStreamArn = "AuditStreamArn" - case fleetArn = "FleetArn" - } - } - - public struct UpdateAuditStreamConfigurationResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateCompanyNetworkConfigurationRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The security groups associated with access to the provided subnets. - public let securityGroupIds: [String] - /// The subnets used for X-ENI connections from Amazon WorkLink rendering containers. - public let subnetIds: [String] - /// The VPC with connectivity to associated websites. - public let vpcId: String - - @inlinable - public init(fleetArn: String, securityGroupIds: [String], subnetIds: [String], vpcId: String) { - self.fleetArn = fleetArn - self.securityGroupIds = securityGroupIds - self.subnetIds = subnetIds - self.vpcId = vpcId - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.securityGroupIds.forEach { - try validate($0, name: "securityGroupIds[]", parent: name, pattern: "^sg-([0-9a-f]{8}|[0-9a-f]{17})$") - } - try self.validate(self.securityGroupIds, name: "securityGroupIds", parent: name, max: 5) - try self.subnetIds.forEach { - try validate($0, name: "subnetIds[]", parent: name, pattern: "^subnet-([0-9a-f]{8}|[0-9a-f]{17})$") - } - try self.validate(self.vpcId, name: "vpcId", parent: name, pattern: "^vpc-([0-9a-f]{8}|[0-9a-f]{17})$") - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case securityGroupIds = "SecurityGroupIds" - case subnetIds = "SubnetIds" - case vpcId = "VpcId" - } - } - - public struct UpdateCompanyNetworkConfigurationResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateDevicePolicyConfigurationRequest: AWSEncodableShape { - /// The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates. - public let deviceCaCertificate: String? - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(deviceCaCertificate: String? = nil, fleetArn: String) { - self.deviceCaCertificate = deviceCaCertificate - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.deviceCaCertificate, name: "deviceCaCertificate", parent: name, max: 32768) - try self.validate(self.deviceCaCertificate, name: "deviceCaCertificate", parent: name, min: 1) - try self.validate(self.deviceCaCertificate, name: "deviceCaCertificate", parent: name, pattern: "^(-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}\\u000D?\\u000A)*-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case deviceCaCertificate = "DeviceCaCertificate" - case fleetArn = "FleetArn" - } - } - - public struct UpdateDevicePolicyConfigurationResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateDomainMetadataRequest: AWSEncodableShape { - /// The name to display. - public let displayName: String? - /// The name of the domain. - public let domainName: String - /// The ARN of the fleet. - public let fleetArn: String - - @inlinable - public init(displayName: String? = nil, domainName: String, fleetArn: String) { - self.displayName = displayName - self.domainName = domainName - self.fleetArn = fleetArn - } - - public func validate(name: String) throws { - try self.validate(self.displayName, name: "displayName", parent: name, max: 100) - try self.validate(self.domainName, name: "domainName", parent: name, max: 253) - try self.validate(self.domainName, name: "domainName", parent: name, min: 1) - try self.validate(self.domainName, name: "domainName", parent: name, pattern: "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$") - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case displayName = "DisplayName" - case domainName = "DomainName" - case fleetArn = "FleetArn" - } - } - - public struct UpdateDomainMetadataResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateFleetMetadataRequest: AWSEncodableShape { - /// The fleet name to display. The existing DisplayName is unset if null is passed. - public let displayName: String? - /// The ARN of the fleet. - public let fleetArn: String - /// The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region. - public let optimizeForEndUserLocation: Bool? - - @inlinable - public init(displayName: String? = nil, fleetArn: String, optimizeForEndUserLocation: Bool? = nil) { - self.displayName = displayName - self.fleetArn = fleetArn - self.optimizeForEndUserLocation = optimizeForEndUserLocation - } - - public func validate(name: String) throws { - try self.validate(self.displayName, name: "displayName", parent: name, max: 100) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - } - - private enum CodingKeys: String, CodingKey { - case displayName = "DisplayName" - case fleetArn = "FleetArn" - case optimizeForEndUserLocation = "OptimizeForEndUserLocation" - } - } - - public struct UpdateFleetMetadataResponse: AWSDecodableShape { - public init() {} - } - - public struct UpdateIdentityProviderConfigurationRequest: AWSEncodableShape { - /// The ARN of the fleet. - public let fleetArn: String - /// The SAML metadata document provided by the customer’s identity provider. The existing IdentityProviderSamlMetadata is unset if null is passed. - public let identityProviderSamlMetadata: String? - /// The type of identity provider. - public let identityProviderType: IdentityProviderType - - @inlinable - public init(fleetArn: String, identityProviderSamlMetadata: String? = nil, identityProviderType: IdentityProviderType) { - self.fleetArn = fleetArn - self.identityProviderSamlMetadata = identityProviderSamlMetadata - self.identityProviderType = identityProviderType - } - - public func validate(name: String) throws { - try self.validate(self.fleetArn, name: "fleetArn", parent: name, max: 2048) - try self.validate(self.fleetArn, name: "fleetArn", parent: name, min: 20) - try self.validate(self.identityProviderSamlMetadata, name: "identityProviderSamlMetadata", parent: name, max: 204800) - try self.validate(self.identityProviderSamlMetadata, name: "identityProviderSamlMetadata", parent: name, min: 1) - } - - private enum CodingKeys: String, CodingKey { - case fleetArn = "FleetArn" - case identityProviderSamlMetadata = "IdentityProviderSamlMetadata" - case identityProviderType = "IdentityProviderType" - } - } - - public struct UpdateIdentityProviderConfigurationResponse: AWSDecodableShape { - public init() {} - } - - public struct WebsiteAuthorizationProviderSummary: AWSDecodableShape { - /// A unique identifier for the authorization provider. - public let authorizationProviderId: String? - /// The authorization provider type. - public let authorizationProviderType: AuthorizationProviderType - /// The time of creation. - public let createdTime: Date? - /// The domain name of the authorization provider. This applies only to SAML-based authorization providers. - public let domainName: String? - - @inlinable - public init(authorizationProviderId: String? = nil, authorizationProviderType: AuthorizationProviderType, createdTime: Date? = nil, domainName: String? = nil) { - self.authorizationProviderId = authorizationProviderId - self.authorizationProviderType = authorizationProviderType - self.createdTime = createdTime - self.domainName = domainName - } - - private enum CodingKeys: String, CodingKey { - case authorizationProviderId = "AuthorizationProviderId" - case authorizationProviderType = "AuthorizationProviderType" - case createdTime = "CreatedTime" - case domainName = "DomainName" - } - } - - public struct WebsiteCaSummary: AWSDecodableShape { - /// The time when the CA was added. - public let createdTime: Date? - /// The name to display. - public let displayName: String? - /// A unique identifier for the CA. - public let websiteCaId: String? - - @inlinable - public init(createdTime: Date? = nil, displayName: String? = nil, websiteCaId: String? = nil) { - self.createdTime = createdTime - self.displayName = displayName - self.websiteCaId = websiteCaId - } - - private enum CodingKeys: String, CodingKey { - case createdTime = "CreatedTime" - case displayName = "DisplayName" - case websiteCaId = "WebsiteCaId" - } - } -} - -// MARK: - Errors - -/// Error enum for WorkLink -public struct WorkLinkErrorType: AWSErrorType { - enum Code: String { - case internalServerErrorException = "InternalServerErrorException" - case invalidRequestException = "InvalidRequestException" - case resourceAlreadyExistsException = "ResourceAlreadyExistsException" - case resourceNotFoundException = "ResourceNotFoundException" - case tooManyRequestsException = "TooManyRequestsException" - case unauthorizedException = "UnauthorizedException" - } - - private let error: Code - public let context: AWSErrorContext? - - /// initialize WorkLink - public init?(errorCode: String, context: AWSErrorContext) { - guard let error = Code(rawValue: errorCode) else { return nil } - self.error = error - self.context = context - } - - internal init(_ error: Code) { - self.error = error - self.context = nil - } - - /// return error code string - public var errorCode: String { self.error.rawValue } - - /// The service is temporarily unavailable. - public static var internalServerErrorException: Self { .init(.internalServerErrorException) } - /// The request is not valid. - public static var invalidRequestException: Self { .init(.invalidRequestException) } - /// The resource already exists. - public static var resourceAlreadyExistsException: Self { .init(.resourceAlreadyExistsException) } - /// The requested resource was not found. - public static var resourceNotFoundException: Self { .init(.resourceNotFoundException) } - /// The number of requests exceeds the limit. - public static var tooManyRequestsException: Self { .init(.tooManyRequestsException) } - /// You are not authorized to perform this action. - public static var unauthorizedException: Self { .init(.unauthorizedException) } -} - -extension WorkLinkErrorType: Equatable { - public static func == (lhs: WorkLinkErrorType, rhs: WorkLinkErrorType) -> Bool { - lhs.error == rhs.error - } -} - -extension WorkLinkErrorType: CustomStringConvertible { - public var description: String { - return "\(self.error.rawValue): \(self.message ?? "")" - } -} diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift index 79d89ff33e..d199068c39 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_api.swift @@ -616,7 +616,7 @@ public struct WorkSpaces: AWSService { return try await self.createWorkspaceImage(input, logger: logger) } - /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing? + /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because DCV (formerly WSP) is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing? @Sendable @inlinable public func createWorkspaces(_ input: CreateWorkspacesRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> CreateWorkspacesResult { @@ -629,7 +629,7 @@ public struct WorkSpaces: AWSService { logger: logger ) } - /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing? + /// Creates one or more WorkSpaces. This operation is asynchronous and returns before the WorkSpaces are created. The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because DCV (formerly WSP) is the default protocol for those bundles. User-decoupled WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to ensure you are using one that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing? /// /// Parameters: /// - workspaces: The WorkSpaces to create. You can specify up to 25 WorkSpaces. @@ -1987,11 +1987,11 @@ public struct WorkSpaces: AWSService { /// Imports the specified Windows 10 or 11 Bring Your Own License (BYOL) image into Amazon WorkSpaces. The image must be an already licensed Amazon EC2 image that is in your Amazon Web Services account, and you must own the image. For more information about creating BYOL images, see Bring Your Own Windows Desktop Licenses. /// /// Parameters: - /// - applications: If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter is an array, only one item is allowed at this time. During the image import process, non-GPU WSP WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU WSP WorkSpaces with Windows 11 do not support Office installation. + /// - applications: If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter is an array, only one item is allowed at this time. During the image import process, non-GPU DCV (formerly WSP) WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU DCV (formerly WSP) WorkSpaces with Windows 11 do not support Office installation. /// - ec2ImageId: The identifier of the EC2 image. /// - imageDescription: The description of the WorkSpace image. /// - imageName: The name of the WorkSpace image. - /// - ingestionProcess: The ingestion process to be used when importing the image, depending on which protocol you want to use for your BYOL Workspace image, either PCoIP, WorkSpaces Streaming Protocol (WSP), or bring your own protocol (BYOP). To use WSP, specify a value that ends in _WSP. To use PCoIP, specify a value that does not end in _WSP. To use BYOP, specify a value that ends in _BYOP. For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR, BYOL_REGULAR_WSP, or BYOL_REGULAR_BYOP, depending on the protocol. The BYOL_REGULAR_BYOP and BYOL_GRAPHICS_G4DN_BYOP values are only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use these values. For more information, see Amazon WorkSpaces Core. + /// - ingestionProcess: The ingestion process to be used when importing the image, depending on which protocol you want to use for your BYOL Workspace image, either PCoIP, DCV, or bring your own protocol (BYOP). To use WSP, specify a value that ends in _DCV. To use PCoIP, specify a value that does not end in _DCV. To use BYOP, specify a value that ends in _BYOP. For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR, BYOL_REGULAR_DCV, or BYOL_REGULAR_BYOP, depending on the protocol. The BYOL_REGULAR_BYOP and BYOL_GRAPHICS_G4DN_BYOP values are only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use these values. For more information, see Amazon WorkSpaces Core. /// - tags: The tags. Each WorkSpaces resource can have a maximum of 50 tags. /// - logger: Logger use during operation @inlinable diff --git a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift index 02e323c390..8a355e8d91 100644 --- a/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift +++ b/Sources/Soto/Services/WorkSpaces/WorkSpaces_shapes.swift @@ -390,6 +390,7 @@ extension WorkSpaces { public enum WorkspaceImageErrorDetailCode: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { case additionalDrivesAttached = "AdditionalDrivesAttached" case antiVirusInstalled = "AntiVirusInstalled" + case appxPackagesInstalled = "AppXPackagesInstalled" case autoLogonEnabled = "AutoLogonEnabled" case autoMountDisabled = "AutoMountDisabled" case azureDomainJoined = "AzureDomainJoined" @@ -407,8 +408,10 @@ extension WorkSpaces { case pcoipAgentInstalled = "PCoIPAgentInstalled" case pendingReboot = "PendingReboot" case realtimeUniversalDisabled = "RealTimeUniversalDisabled" + case reservedStorageInUse = "ReservedStorageInUse" case sixtyFourBitOs = "Requires64BitOS" case uefiNotSupported = "UEFINotSupported" + case unknownError = "UnknownError" case vmwareToolsInstalled = "VMWareToolsInstalled" case windowsUpdatesEnabled = "WindowsUpdatesEnabled" case workspacesByolAccountDisabled = "WorkspacesBYOLAccountDisabled" @@ -3731,7 +3734,7 @@ extension WorkSpaces { } public struct ImportWorkspaceImageRequest: AWSEncodableShape { - /// If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter is an array, only one item is allowed at this time. During the image import process, non-GPU WSP WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU WSP WorkSpaces with Windows 11 do not support Office installation. + /// If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter is an array, only one item is allowed at this time. During the image import process, non-GPU DCV (formerly WSP) WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU DCV (formerly WSP) WorkSpaces with Windows 11 do not support Office installation. public let applications: [Application]? /// The identifier of the EC2 image. public let ec2ImageId: String @@ -3739,7 +3742,7 @@ extension WorkSpaces { public let imageDescription: String /// The name of the WorkSpace image. public let imageName: String - /// The ingestion process to be used when importing the image, depending on which protocol you want to use for your BYOL Workspace image, either PCoIP, WorkSpaces Streaming Protocol (WSP), or bring your own protocol (BYOP). To use WSP, specify a value that ends in _WSP. To use PCoIP, specify a value that does not end in _WSP. To use BYOP, specify a value that ends in _BYOP. For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR, BYOL_REGULAR_WSP, or BYOL_REGULAR_BYOP, depending on the protocol. The BYOL_REGULAR_BYOP and BYOL_GRAPHICS_G4DN_BYOP values are only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use these values. For more information, see Amazon WorkSpaces Core. + /// The ingestion process to be used when importing the image, depending on which protocol you want to use for your BYOL Workspace image, either PCoIP, DCV, or bring your own protocol (BYOP). To use WSP, specify a value that ends in _DCV. To use PCoIP, specify a value that does not end in _DCV. To use BYOP, specify a value that ends in _BYOP. For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR, BYOL_REGULAR_DCV, or BYOL_REGULAR_BYOP, depending on the protocol. The BYOL_REGULAR_BYOP and BYOL_GRAPHICS_G4DN_BYOP values are only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use these values. For more information, see Amazon WorkSpaces Core. public let ingestionProcess: WorkspaceImageIngestionProcess /// The tags. Each WorkSpaces resource can have a maximum of 50 tags. public let tags: [Tag]? @@ -6046,7 +6049,7 @@ extension WorkSpaces { public let computeTypeName: Compute? /// The name of the operating system. public let operatingSystemName: OperatingSystemName? - /// The protocol. For more information, see Protocols for Amazon WorkSpaces. Only available for WorkSpaces created with PCoIP bundles. The Protocols property is case sensitive. Ensure you use PCOIP or WSP. Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn). + /// The protocol. For more information, see Protocols for Amazon WorkSpaces. Only available for WorkSpaces created with PCoIP bundles. The Protocols property is case sensitive. Ensure you use PCOIP or DCV (formerly WSP). Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn). public let protocols: [`Protocol`]? /// The size of the root volume. For important information about how to modify the size of the root and user volumes, see Modify a WorkSpace. public let rootVolumeSizeGib: Int? diff --git a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift index cc5343aba9..93582f7b48 100644 --- a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift +++ b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_api.swift @@ -337,7 +337,7 @@ public struct WorkSpacesWeb: AWSService { /// /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK. - /// - identityProviderDetails: The identity provider details. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional + /// - identityProviderDetails: The identity provider details. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional /// - identityProviderName: The identity provider name. /// - identityProviderType: The identity provider type. /// - portalArn: The ARN of the web portal. @@ -469,7 +469,7 @@ public struct WorkSpacesWeb: AWSService { /// /// Parameters: /// - additionalEncryptionContext: The additional encryption context of the portal. - /// - authenticationType: The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. + /// - authenticationType: The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK. /// - customerManagedKey: The customer managed key of the web portal. /// - displayName: The name of the web portal. This is not visible to users who log into the web portal. @@ -1043,6 +1043,38 @@ public struct WorkSpacesWeb: AWSService { return try await self.disassociateUserSettings(input, logger: logger) } + /// Expires an active secure browser session. + @Sendable + @inlinable + public func expireSession(_ input: ExpireSessionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ExpireSessionResponse { + try await self.client.execute( + operation: "ExpireSession", + path: "/portals/{portalId}/sessions/{sessionId}", + httpMethod: .DELETE, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Expires an active secure browser session. + /// + /// Parameters: + /// - portalId: The ID of the web portal for the session. + /// - sessionId: The ID of the session to expire. + /// - logger: Logger use during operation + @inlinable + public func expireSession( + portalId: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ExpireSessionResponse { + let input = ExpireSessionRequest( + portalId: portalId, + sessionId: sessionId + ) + return try await self.expireSession(input, logger: logger) + } + /// Gets browser settings. @Sendable @inlinable @@ -1217,6 +1249,38 @@ public struct WorkSpacesWeb: AWSService { return try await self.getPortalServiceProviderMetadata(input, logger: logger) } + /// Gets information for a secure browser session. + @Sendable + @inlinable + public func getSession(_ input: GetSessionRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> GetSessionResponse { + try await self.client.execute( + operation: "GetSession", + path: "/portals/{portalId}/sessions/{sessionId}", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Gets information for a secure browser session. + /// + /// Parameters: + /// - portalId: The ID of the web portal for the session. + /// - sessionId: The ID of the session. + /// - logger: Logger use during operation + @inlinable + public func getSession( + portalId: String, + sessionId: String, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> GetSessionResponse { + let input = GetSessionRequest( + portalId: portalId, + sessionId: sessionId + ) + return try await self.getSession(input, logger: logger) + } + /// Gets the trust store. @Sendable @inlinable @@ -1499,6 +1563,53 @@ public struct WorkSpacesWeb: AWSService { return try await self.listPortals(input, logger: logger) } + /// Lists information for multiple secure browser sessions from a specific portal. + @Sendable + @inlinable + public func listSessions(_ input: ListSessionsRequest, logger: Logger = AWSClient.loggingDisabled) async throws -> ListSessionsResponse { + try await self.client.execute( + operation: "ListSessions", + path: "/portals/{portalId}/sessions", + httpMethod: .GET, + serviceConfig: self.config, + input: input, + logger: logger + ) + } + /// Lists information for multiple secure browser sessions from a specific portal. + /// + /// Parameters: + /// - maxResults: The maximum number of results to be included in the next page. + /// - nextToken: The pagination token used to retrieve the next page of results for this operation. + /// - portalId: The ID of the web portal for the sessions. + /// - sessionId: The ID of the session. + /// - sortBy: The method in which the returned sessions should be sorted. + /// - status: The status of the session. + /// - username: The username of the session. + /// - logger: Logger use during operation + @inlinable + public func listSessions( + maxResults: Int? = nil, + nextToken: String? = nil, + portalId: String, + sessionId: String? = nil, + sortBy: SessionSortBy? = nil, + status: SessionStatus? = nil, + username: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) async throws -> ListSessionsResponse { + let input = ListSessionsRequest( + maxResults: maxResults, + nextToken: nextToken, + portalId: portalId, + sessionId: sessionId, + sortBy: sortBy, + status: status, + username: username + ) + return try await self.listSessions(input, logger: logger) + } + /// Retrieves a list of tags for a resource. @Sendable @inlinable @@ -1779,7 +1890,7 @@ public struct WorkSpacesWeb: AWSService { /// Parameters: /// - clientToken: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token return the result from the original successful request. If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK. /// - identityProviderArn: The ARN of the identity provider. - /// - identityProviderDetails: The details of the identity provider. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional + /// - identityProviderDetails: The details of the identity provider. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional /// - identityProviderName: The name of the identity provider. /// - identityProviderType: The type of the identity provider. /// - logger: Logger use during operation @@ -1900,7 +2011,7 @@ public struct WorkSpacesWeb: AWSService { /// Updates a web portal. /// /// Parameters: - /// - authenticationType: The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. + /// - authenticationType: The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. /// - displayName: The name of the web portal. This is not visible to users who log into the web portal. /// - instanceType: The type and resources of the underlying instance. /// - maxConcurrentSessions: The maximum number of concurrent sessions for the portal. @@ -2244,6 +2355,55 @@ extension WorkSpacesWeb { return self.listPortalsPaginator(input, logger: logger) } + /// Return PaginatorSequence for operation ``listSessions(_:logger:)``. + /// + /// - Parameters: + /// - input: Input for operation + /// - logger: Logger used for logging + @inlinable + public func listSessionsPaginator( + _ input: ListSessionsRequest, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + return .init( + input: input, + command: self.listSessions, + inputKey: \ListSessionsRequest.nextToken, + outputKey: \ListSessionsResponse.nextToken, + logger: logger + ) + } + /// Return PaginatorSequence for operation ``listSessions(_:logger:)``. + /// + /// - Parameters: + /// - maxResults: The maximum number of results to be included in the next page. + /// - portalId: The ID of the web portal for the sessions. + /// - sessionId: The ID of the session. + /// - sortBy: The method in which the returned sessions should be sorted. + /// - status: The status of the session. + /// - username: The username of the session. + /// - logger: Logger used for logging + @inlinable + public func listSessionsPaginator( + maxResults: Int? = nil, + portalId: String, + sessionId: String? = nil, + sortBy: SessionSortBy? = nil, + status: SessionStatus? = nil, + username: String? = nil, + logger: Logger = AWSClient.loggingDisabled + ) -> AWSClient.PaginatorSequence { + let input = ListSessionsRequest( + maxResults: maxResults, + portalId: portalId, + sessionId: sessionId, + sortBy: sortBy, + status: status, + username: username + ) + return self.listSessionsPaginator(input, logger: logger) + } + /// Return PaginatorSequence for operation ``listTrustStoreCertificates(_:logger:)``. /// /// - Parameters: @@ -2435,6 +2595,21 @@ extension WorkSpacesWeb.ListPortalsRequest: AWSPaginateToken { } } +extension WorkSpacesWeb.ListSessionsRequest: AWSPaginateToken { + @inlinable + public func usingPaginationToken(_ token: String) -> WorkSpacesWeb.ListSessionsRequest { + return .init( + maxResults: self.maxResults, + nextToken: token, + portalId: self.portalId, + sessionId: self.sessionId, + sortBy: self.sortBy, + status: self.status, + username: self.username + ) + } +} + extension WorkSpacesWeb.ListTrustStoreCertificatesRequest: AWSPaginateToken { @inlinable public func usingPaginationToken(_ token: String) -> WorkSpacesWeb.ListTrustStoreCertificatesRequest { diff --git a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift index 7decf6ab04..be1196e8af 100644 --- a/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift +++ b/Sources/Soto/Services/WorkSpacesWeb/WorkSpacesWeb_shapes.swift @@ -72,6 +72,18 @@ extension WorkSpacesWeb { public var description: String { return self.rawValue } } + public enum SessionSortBy: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case startTimeAscending = "StartTimeAscending" + case startTimeDescending = "StartTimeDescending" + public var description: String { return self.rawValue } + } + + public enum SessionStatus: String, CustomStringConvertible, Codable, Sendable, CodingKeyRepresentable { + case active = "Active" + case terminated = "Terminated" + public var description: String { return self.rawValue } + } + // MARK: Shapes public struct AssociateBrowserSettingsRequest: AWSEncodableShape { @@ -603,7 +615,7 @@ extension WorkSpacesWeb { public struct CreateIdentityProviderRequest: AWSEncodableShape { /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK. public let clientToken: String? - /// The identity provider details. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional + /// The identity provider details. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional public let identityProviderDetails: [String: String] /// The identity provider name. public let identityProviderName: String @@ -786,7 +798,7 @@ extension WorkSpacesWeb { try validate($0, name: "subnetIds[]", parent: name, min: 1) try validate($0, name: "subnetIds[]", parent: name, pattern: "^subnet-([0-9a-f]{8}|[0-9a-f]{17})$") } - try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 3) + try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 5) try self.validate(self.subnetIds, name: "subnetIds", parent: name, min: 2) try self.tags?.forEach { try $0.validate(name: "\(name).tags[]") @@ -823,7 +835,7 @@ extension WorkSpacesWeb { public struct CreatePortalRequest: AWSEncodableShape { /// The additional encryption context of the portal. public let additionalEncryptionContext: [String: String]? - /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. + /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. public let authenticationType: AuthenticationType? /// A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. Idempotency ensures that an API request completes only once. With an idempotent request, if the original request completes successfully, subsequent retries with the same client token returns the result from the original successful request. If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK. public let clientToken: String? @@ -1487,6 +1499,41 @@ extension WorkSpacesWeb { public init() {} } + public struct ExpireSessionRequest: AWSEncodableShape { + /// The ID of the web portal for the session. + public let portalId: String + /// The ID of the session to expire. + public let sessionId: String + + @inlinable + public init(portalId: String, sessionId: String) { + self.portalId = portalId + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.portalId, key: "portalId") + request.encodePath(self.sessionId, key: "sessionId") + } + + public func validate(name: String) throws { + try self.validate(self.portalId, name: "portalId", parent: name, max: 36) + try self.validate(self.portalId, name: "portalId", parent: name, min: 36) + try self.validate(self.portalId, name: "portalId", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ExpireSessionResponse: AWSDecodableShape { + public init() {} + } + public struct GetBrowserSettingsRequest: AWSEncodableShape { /// The ARN of the browser settings. public let browserSettingsArn: String @@ -1719,6 +1766,51 @@ extension WorkSpacesWeb { } } + public struct GetSessionRequest: AWSEncodableShape { + /// The ID of the web portal for the session. + public let portalId: String + /// The ID of the session. + public let sessionId: String + + @inlinable + public init(portalId: String, sessionId: String) { + self.portalId = portalId + self.sessionId = sessionId + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodePath(self.portalId, key: "portalId") + request.encodePath(self.sessionId, key: "sessionId") + } + + public func validate(name: String) throws { + try self.validate(self.portalId, name: "portalId", parent: name, max: 36) + try self.validate(self.portalId, name: "portalId", parent: name, min: 36) + try self.validate(self.portalId, name: "portalId", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct GetSessionResponse: AWSDecodableShape { + /// The sessions in a list. + public let session: Session? + + @inlinable + public init(session: Session? = nil) { + self.session = session + } + + private enum CodingKeys: String, CodingKey { + case session = "session" + } + } + public struct GetTrustStoreCertificateRequest: AWSEncodableShape { /// The thumbprint of the trust store certificate. public let thumbprint: String @@ -1885,7 +1977,7 @@ extension WorkSpacesWeb { public struct IdentityProvider: AWSDecodableShape { /// The ARN of the identity provider. public let identityProviderArn: String - /// The identity provider details. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional + /// The identity provider details. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional public let identityProviderDetails: [String: String]? /// The identity provider name. public let identityProviderName: String? @@ -2265,6 +2357,81 @@ extension WorkSpacesWeb { } } + public struct ListSessionsRequest: AWSEncodableShape { + /// The maximum number of results to be included in the next page. + public let maxResults: Int? + /// The pagination token used to retrieve the next page of results for this operation. + public let nextToken: String? + /// The ID of the web portal for the sessions. + public let portalId: String + /// The ID of the session. + public let sessionId: String? + /// The method in which the returned sessions should be sorted. + public let sortBy: SessionSortBy? + /// The status of the session. + public let status: SessionStatus? + /// The username of the session. + public let username: String? + + @inlinable + public init(maxResults: Int? = nil, nextToken: String? = nil, portalId: String, sessionId: String? = nil, sortBy: SessionSortBy? = nil, status: SessionStatus? = nil, username: String? = nil) { + self.maxResults = maxResults + self.nextToken = nextToken + self.portalId = portalId + self.sessionId = sessionId + self.sortBy = sortBy + self.status = status + self.username = username + } + + public func encode(to encoder: Encoder) throws { + let request = encoder.userInfo[.awsRequest]! as! RequestEncodingContainer + _ = encoder.container(keyedBy: CodingKeys.self) + request.encodeQuery(self.maxResults, key: "maxResults") + request.encodeQuery(self.nextToken, key: "nextToken") + request.encodePath(self.portalId, key: "portalId") + request.encodeQuery(self.sessionId, key: "sessionId") + request.encodeQuery(self.sortBy, key: "sortBy") + request.encodeQuery(self.status, key: "status") + request.encodeQuery(self.username, key: "username") + } + + public func validate(name: String) throws { + try self.validate(self.maxResults, name: "maxResults", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, max: 2048) + try self.validate(self.nextToken, name: "nextToken", parent: name, min: 1) + try self.validate(self.nextToken, name: "nextToken", parent: name, pattern: "^\\S+$") + try self.validate(self.portalId, name: "portalId", parent: name, max: 36) + try self.validate(self.portalId, name: "portalId", parent: name, min: 36) + try self.validate(self.portalId, name: "portalId", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + try self.validate(self.sessionId, name: "sessionId", parent: name, max: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, min: 36) + try self.validate(self.sessionId, name: "sessionId", parent: name, pattern: "^[a-zA-Z0-9\\-]+$") + try self.validate(self.username, name: "username", parent: name, max: 256) + try self.validate(self.username, name: "username", parent: name, pattern: "^[\\s\\S]*$") + } + + private enum CodingKeys: CodingKey {} + } + + public struct ListSessionsResponse: AWSDecodableShape { + /// The pagination token used to retrieve the next page of results for this operation. + public let nextToken: String? + /// The sessions in a list. + public let sessions: [SessionSummary] + + @inlinable + public init(nextToken: String? = nil, sessions: [SessionSummary]) { + self.nextToken = nextToken + self.sessions = sessions + } + + private enum CodingKeys: String, CodingKey { + case nextToken = "nextToken" + case sessions = "sessions" + } + } + public struct ListTagsForResourceRequest: AWSEncodableShape { /// The ARN of the resource. public let resourceArn: String @@ -2553,7 +2720,7 @@ extension WorkSpacesWeb { public struct Portal: AWSDecodableShape { /// The additional encryption context of the portal. public let additionalEncryptionContext: [String: String]? - /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. + /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. public let authenticationType: AuthenticationType? /// The ARN of the browser settings that is associated with this web portal. public let browserSettingsArn: String? @@ -2637,7 +2804,7 @@ extension WorkSpacesWeb { } public struct PortalSummary: AWSDecodableShape { - /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. + /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. public let authenticationType: AuthenticationType? /// The ARN of the browser settings that is associated with the web portal. public let browserSettingsArn: String? @@ -2710,6 +2877,78 @@ extension WorkSpacesWeb { } } + public struct Session: AWSDecodableShape { + /// The IP address of the client. + public let clientIpAddresses: [String]? + /// The end time of the session. + public let endTime: Date? + /// The ARN of the web portal. + public let portalArn: String? + /// The ID of the session. + public let sessionId: String? + /// The start time of the session. + public let startTime: Date? + /// The status of the session. + public let status: SessionStatus? + /// The username of the session. + public let username: String? + + @inlinable + public init(clientIpAddresses: [String]? = nil, endTime: Date? = nil, portalArn: String? = nil, sessionId: String? = nil, startTime: Date? = nil, status: SessionStatus? = nil, username: String? = nil) { + self.clientIpAddresses = clientIpAddresses + self.endTime = endTime + self.portalArn = portalArn + self.sessionId = sessionId + self.startTime = startTime + self.status = status + self.username = username + } + + private enum CodingKeys: String, CodingKey { + case clientIpAddresses = "clientIpAddresses" + case endTime = "endTime" + case portalArn = "portalArn" + case sessionId = "sessionId" + case startTime = "startTime" + case status = "status" + case username = "username" + } + } + + public struct SessionSummary: AWSDecodableShape { + /// The end time of the session. + public let endTime: Date? + /// The ARN of the web portal. + public let portalArn: String? + /// The ID of the session. + public let sessionId: String? + /// The start time of the session. + public let startTime: Date? + /// The status of the session. + public let status: SessionStatus? + /// The username of the session. + public let username: String? + + @inlinable + public init(endTime: Date? = nil, portalArn: String? = nil, sessionId: String? = nil, startTime: Date? = nil, status: SessionStatus? = nil, username: String? = nil) { + self.endTime = endTime + self.portalArn = portalArn + self.sessionId = sessionId + self.startTime = startTime + self.status = status + self.username = username + } + + private enum CodingKeys: String, CodingKey { + case endTime = "endTime" + case portalArn = "portalArn" + case sessionId = "sessionId" + case startTime = "startTime" + case status = "status" + case username = "username" + } + } + public struct Tag: AWSEncodableShape & AWSDecodableShape { /// The key of the tag. public let key: String @@ -2910,7 +3149,7 @@ extension WorkSpacesWeb { public let clientToken: String? /// The ARN of the identity provider. public let identityProviderArn: String - /// The details of the identity provider. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional + /// The details of the identity provider. The following list describes the provider detail keys for each identity provider type. For Google and Login with Amazon: client_id client_secret authorize_scopes For Facebook: client_id client_secret authorize_scopes api_version For Sign in with Apple: client_id team_id key_id private_key authorize_scopes For OIDC providers: client_id client_secret attributes_request_method oidc_issuer authorize_scopes authorize_url if not available from discovery URL specified by oidc_issuer key token_url if not available from discovery URL specified by oidc_issuer key attributes_url if not available from discovery URL specified by oidc_issuer key jwks_uri if not available from discovery URL specified by oidc_issuer key For SAML providers: MetadataFile OR MetadataURL IDPSignout (boolean) optional IDPInit (boolean) optional RequestSigningAlgorithm (string) optional - Only accepts rsa-sha256 EncryptedResponses (boolean) optional public let identityProviderDetails: [String: String]? /// The name of the identity provider. public let identityProviderName: String? @@ -3096,7 +3335,7 @@ extension WorkSpacesWeb { try validate($0, name: "subnetIds[]", parent: name, min: 1) try validate($0, name: "subnetIds[]", parent: name, pattern: "^subnet-([0-9a-f]{8}|[0-9a-f]{17})$") } - try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 3) + try self.validate(self.subnetIds, name: "subnetIds", parent: name, max: 5) try self.validate(self.subnetIds, name: "subnetIds", parent: name, min: 2) try self.validate(self.vpcId, name: "vpcId", parent: name, max: 255) try self.validate(self.vpcId, name: "vpcId", parent: name, min: 1) @@ -3126,7 +3365,7 @@ extension WorkSpacesWeb { } public struct UpdatePortalRequest: AWSEncodableShape { - /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. + /// The type of authentication integration points used when signing into the web portal. Defaults to Standard. Standard web portals are authenticated directly through your identity provider. You need to call CreateIdentityProvider to integrate your identity provider with your web portal. User and group access to your web portal is controlled through your identity provider. IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources (including external identity provider integration), plus user and group access to your web portal, can be configured in the IAM Identity Center. public let authenticationType: AuthenticationType? /// The name of the web portal. This is not visible to users who log into the web portal. public let displayName: String? diff --git a/Tests/SotoTests/AWSRequestTests.swift b/Tests/SotoTests/AWSRequestTests.swift index aae6bab426..f1bc9de1d8 100644 --- a/Tests/SotoTests/AWSRequestTests.swift +++ b/Tests/SotoTests/AWSRequestTests.swift @@ -111,18 +111,18 @@ class AWSHTTPRequestTests: XCTestCase { let expectedResult = "7Enabled30truetempEnabledEnabled20GLACIER180DEEP_ARCHIVE90Disabled" - let abortRule = S3.LifecycleRule(abortIncompleteMultipartUpload: S3.AbortIncompleteMultipartUpload(daysAfterInitiation: 7), filter: .prefix(""), status: .enabled) + let abortRule = S3.LifecycleRule(abortIncompleteMultipartUpload: S3.AbortIncompleteMultipartUpload(daysAfterInitiation: 7), filter: .init(prefix: ""), status: .enabled) let tempFileRule = S3.LifecycleRule( expiration: S3.LifecycleExpiration(days: 30, expiredObjectDeleteMarker: true), - filter: .prefix("temp"), + filter: .init(prefix: "temp"), status: .enabled ) let glacierRule = S3.LifecycleRule( - filter: .prefix(""), + filter: .init(prefix: ""), status: .enabled, transitions: [S3.Transition(days: 20, storageClass: .glacier), S3.Transition(days: 180, storageClass: .deepArchive)] ) - let versionsRule = S3.LifecycleRule(filter: .prefix(""), noncurrentVersionExpiration: S3.NoncurrentVersionExpiration(noncurrentDays: 90), status: .disabled) + let versionsRule = S3.LifecycleRule(filter: .init(prefix: ""), noncurrentVersionExpiration: S3.NoncurrentVersionExpiration(noncurrentDays: 90), status: .disabled) let rules = [abortRule, tempFileRule, glacierRule, versionsRule] let lifecycleConfiguration = S3.BucketLifecycleConfiguration(rules: rules) let request = S3.PutBucketLifecycleConfigurationRequest(bucket: "bucket", lifecycleConfiguration: lifecycleConfiguration) diff --git a/Tests/SotoTests/Services/S3/S3Tests.swift b/Tests/SotoTests/Services/S3/S3Tests.swift index dbdc699581..b995ef62ba 100644 --- a/Tests/SotoTests/Services/S3/S3Tests.swift +++ b/Tests/SotoTests/Services/S3/S3Tests.swift @@ -274,7 +274,7 @@ class S3Tests: XCTestCase { try await self.testBucket(name) { name in // set lifecycle rules let incompleteMultipartUploads = S3.AbortIncompleteMultipartUpload(daysAfterInitiation: 7) // clear incomplete multipart uploads after 7 days - let filter = S3.LifecycleRuleFilter.prefix("") // everything + let filter = S3.LifecycleRuleFilter(prefix: "") // everything let transitions = [S3.Transition(days: 14, storageClass: .glacier)] // transition objects to glacier after 14 days let lifecycleRules = S3.LifecycleRule( abortIncompleteMultipartUpload: incompleteMultipartUploads, @@ -378,7 +378,7 @@ class S3Tests: XCTestCase { try await self.testBucket(name) { name in let rule = S3.LifecycleRule( abortIncompleteMultipartUpload: .init(daysAfterInitiation: 7), - filter: .prefix(""), + filter: .init(prefix: ""), id: "multipart-upload", status: .enabled ) diff --git a/models/acm-pca.json b/models/acm-pca.json index 4c2a880b7f..7c697300aa 100644 --- a/models/acm-pca.json +++ b/models/acm-pca.json @@ -1778,7 +1778,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an audit report that lists every time that your CA private key is used. The\n\t\t\treport is saved in the Amazon S3 bucket that you specify on input. The IssueCertificate and RevokeCertificate actions use\n\t\t\tthe private key.

\n \n

Both Amazon Web Services Private CA and the IAM principal must have permission to write to\n the S3 bucket that you specify. If the IAM principal making the call\n does not have permission to write to the bucket, then an exception is\n thrown. For more information, see Access \n\t\t\t\t\t\tpolicies for CRLs in Amazon S3.

\n
\n

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your Audit\n\t\t\t\tReports.

\n \n

You can generate a maximum of one report every 30 minutes.

\n
", + "smithy.api#documentation": "

Creates an audit report that lists every time that your CA private key is used to issue a certificate. The IssueCertificate and RevokeCertificate actions use\n\t\t\tthe private key.

\n

To save the audit report to your designated Amazon S3 bucket, you must create a bucket policy that grants Amazon Web Services Private CA permission to access and write to it. For an example policy, see Prepare an Amazon S3 bucket for audit reports.

\n

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. \n For more information, see Encrypting Your Audit\n\t\t\t\tReports.

\n \n

You can generate a maximum of one report every 30 minutes.

\n
", "smithy.api#idempotent": {} } }, @@ -1844,7 +1844,7 @@ "RevocationConfiguration": { "target": "com.amazonaws.acmpca#RevocationConfiguration", "traits": { - "smithy.api#documentation": "

Contains information to enable Online Certificate Status Protocol (OCSP) support, to\n\t\t\tenable a certificate revocation list (CRL), to enable both, or to enable neither. The\n\t\t\tdefault is for both certificate validation mechanisms to be disabled.

\n \n

The following requirements apply to revocation configurations.

\n
    \n
  • \n

    A configuration disabling CRLs or OCSP must contain only the Enabled=False \n\t\t\t\t\tparameter, and will fail if other parameters such as CustomCname or \n\t\t\t\t\tExpirationInDays are included.

    \n
  • \n
  • \n

    In a CRL configuration, the S3BucketName parameter must conform to \n\t\t\t\t\tAmazon S3 \n\t\t\t\t\tbucket naming rules.

    \n
  • \n
  • \n

    A configuration containing a custom Canonical\n\t\t\t\t\t\tName (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions\n\t\t\t\t\t\ton the use of special characters in a CNAME.

    \n
  • \n
  • \n

    In a CRL or OCSP configuration, the value of a CNAME parameter must not include a\n\t\t\t\t\t\tprotocol prefix such as \"http://\" or \"https://\".

    \n
  • \n
\n
\n

For more information, see the OcspConfiguration and CrlConfiguration\n\t\t\ttypes.

" + "smithy.api#documentation": "

Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. By default, both certificate validation mechanisms are disabled.

\n

The following requirements apply to revocation configurations.

\n
    \n
  • \n

    A configuration disabling CRLs or OCSP must contain only the Enabled=False \n\t\t\t\t\tparameter, and will fail if other parameters such as CustomCname or \n\t\t\t\t\tExpirationInDays are included.

    \n
  • \n
  • \n

    In a CRL configuration, the S3BucketName parameter must conform to \n\t\t\t\t\tAmazon S3 \n\t\t\t\t\tbucket naming rules.

    \n
  • \n
  • \n

    A configuration containing a custom Canonical\n\t\t\t\t\t\tName (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions\n\t\t\t\t\t\ton the use of special characters in a CNAME.

    \n
  • \n
  • \n

    In a CRL or OCSP configuration, the value of a CNAME parameter must not include a\n\t\t\t\t\t\tprotocol prefix such as \"http://\" or \"https://\".

    \n
  • \n
\n

For more information, see the OcspConfiguration and CrlConfiguration\n\t\t\ttypes.

" } }, "CertificateAuthorityType": { @@ -3040,7 +3040,7 @@ } ], "traits": { - "smithy.api#documentation": "

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you\n\t\t\tare using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call\n\t\t\tthis action, the following preparations must in place:

\n
    \n
  1. \n

    In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you\n\t\t\t\t\tplan to back with the imported certificate.

    \n
  2. \n
  3. \n

    Call the GetCertificateAuthorityCsr action to generate a certificate signing\n\t\t\t\t\trequest (CSR).

    \n
  4. \n
  5. \n

    Sign the CSR using a root or intermediate CA hosted by either an on-premises\n\t\t\t\t\tPKI hierarchy or by a commercial CA.

    \n
  6. \n
  7. \n

    Create a certificate chain and copy the signed certificate and the certificate\n\t\t\t\t\tchain to your working directory.

    \n
  8. \n
\n

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

\n
    \n
  • \n

    Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is hosted by\n\t\t\t\t\tAmazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is externally\n\t\t\t\t\thosted.

    \n
  • \n
\n

The following additional requirements apply when you import a CA certificate.

\n
    \n
  • \n

    Only a self-signed certificate can be imported as a root CA.

    \n
  • \n
  • \n

    A self-signed certificate cannot be imported as a subordinate CA.

    \n
  • \n
  • \n

    Your certificate chain must not include the private CA certificate that you\n\t\t\t\t\tare importing.

    \n
  • \n
  • \n

    Your root CA must be the last certificate in your chain. The subordinate\n\t\t\t\t\tcertificate, if any, that your root CA signed must be next to last. The\n\t\t\t\t\tsubordinate certificate signed by the preceding subordinate CA must come next,\n\t\t\t\t\tand so on until your chain is built.

    \n
  • \n
  • \n

    The chain must be PEM-encoded.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate is 32 KB.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate chain is 2 MB.

    \n
  • \n
\n

\n Enforcement of Critical Constraints\n

\n

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA\n\t\t\tcertificate or chain.

\n
    \n
  • \n

    Basic constraints (must be marked critical)

    \n
  • \n
  • \n

    Subject alternative names

    \n
  • \n
  • \n

    Key usage

    \n
  • \n
  • \n

    Extended key usage

    \n
  • \n
  • \n

    Authority key identifier

    \n
  • \n
  • \n

    Subject key identifier

    \n
  • \n
  • \n

    Issuer alternative name

    \n
  • \n
  • \n

    Subject directory attributes

    \n
  • \n
  • \n

    Subject information access

    \n
  • \n
  • \n

    Certificate policies

    \n
  • \n
  • \n

    Policy mappings

    \n
  • \n
  • \n

    Inhibit anyPolicy

    \n
  • \n
\n

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an\n\t\t\timported CA certificate or chain.

\n
    \n
  • \n

    Name constraints

    \n
  • \n
  • \n

    Policy constraints

    \n
  • \n
  • \n

    CRL distribution points

    \n
  • \n
  • \n

    Authority information access

    \n
  • \n
  • \n

    Freshest CRL

    \n
  • \n
  • \n

    Any other extension

    \n
  • \n
" + "smithy.api#documentation": "

Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you\n\t\t\tare using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call\n\t\t\tthis action, the following preparations must in place:

\n
    \n
  1. \n

    In Amazon Web Services Private CA, call the CreateCertificateAuthority action to create the private CA that you\n\t\t\t\t\tplan to back with the imported certificate.

    \n
  2. \n
  3. \n

    Call the GetCertificateAuthorityCsr action to generate a certificate signing\n\t\t\t\t\trequest (CSR).

    \n
  4. \n
  5. \n

    Sign the CSR using a root or intermediate CA hosted by either an on-premises\n\t\t\t\t\tPKI hierarchy or by a commercial CA.

    \n
  6. \n
  7. \n

    Create a certificate chain and copy the signed certificate and the certificate\n\t\t\t\t\tchain to your working directory.

    \n
  8. \n
\n

Amazon Web Services Private CA supports three scenarios for installing a CA certificate:

\n
    \n
  • \n

    Installing a certificate for a root CA hosted by Amazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is hosted by\n\t\t\t\t\tAmazon Web Services Private CA.

    \n
  • \n
  • \n

    Installing a subordinate CA certificate whose parent authority is externally\n\t\t\t\t\thosted.

    \n
  • \n
\n

The following additional requirements apply when you import a CA certificate.

\n
    \n
  • \n

    Only a self-signed certificate can be imported as a root CA.

    \n
  • \n
  • \n

    A self-signed certificate cannot be imported as a subordinate CA.

    \n
  • \n
  • \n

    Your certificate chain must not include the private CA certificate that you\n\t\t\t\t\tare importing.

    \n
  • \n
  • \n

    Your root CA must be the last certificate in your chain. The subordinate\n\t\t\t\t\tcertificate, if any, that your root CA signed must be next to last. The\n\t\t\t\t\tsubordinate certificate signed by the preceding subordinate CA must come next,\n\t\t\t\t\tand so on until your chain is built.

    \n
  • \n
  • \n

    The chain must be PEM-encoded.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate is 32 KB.

    \n
  • \n
  • \n

    The maximum allowed size of a certificate chain is 2 MB.

    \n
  • \n
\n

\n Enforcement of Critical Constraints\n

\n

Amazon Web Services Private CA allows the following extensions to be marked critical in the imported CA\n\t\t\tcertificate or chain.

\n
    \n
  • \n

    Authority key identifier

    \n
  • \n
  • \n

    Basic constraints (must be marked critical)

    \n
  • \n
  • \n

    Certificate policies

    \n
  • \n
  • \n

    Extended key usage

    \n
  • \n
  • \n

    Inhibit anyPolicy

    \n
  • \n
  • \n

    Issuer alternative name

    \n
  • \n
  • \n

    Key usage

    \n
  • \n
  • \n

    Name constraints

    \n
  • \n
  • \n

    Policy mappings

    \n
  • \n
  • \n

    Subject alternative name

    \n
  • \n
  • \n

    Subject directory attributes

    \n
  • \n
  • \n

    Subject key identifier

    \n
  • \n
  • \n

    Subject information access

    \n
  • \n
\n

Amazon Web Services Private CA rejects the following extensions when they are marked critical in an\n\t\t\timported CA certificate or chain.

\n
    \n
  • \n

    Authority information access

    \n
  • \n
  • \n

    CRL distribution points

    \n
  • \n
  • \n

    Freshest CRL

    \n
  • \n
  • \n

    Policy constraints

    \n
  • \n
\n

Amazon Web Services Private Certificate Authority will also reject any other extension marked as critical not contained on the preceding list of allowed extensions.

" } }, "com.amazonaws.acmpca#ImportCertificateAuthorityCertificateRequest": { @@ -4612,7 +4612,7 @@ "RevocationConfiguration": { "target": "com.amazonaws.acmpca#RevocationConfiguration", "traits": { - "smithy.api#documentation": "

Contains information to enable Online Certificate Status Protocol (OCSP) support, to\n\t\t\tenable a certificate revocation list (CRL), to enable both, or to enable neither. If\n\t\t\tthis parameter is not supplied, existing capibilites remain unchanged. For more\n\t\t\tinformation, see the OcspConfiguration and CrlConfiguration types.

\n \n

The following requirements apply to revocation configurations.

\n
    \n
  • \n

    A configuration disabling CRLs or OCSP must contain only the Enabled=False \n\t\t\t\t\tparameter, and will fail if other parameters such as CustomCname or \n\t\t\t\t\tExpirationInDays are included.

    \n
  • \n
  • \n

    In a CRL configuration, the S3BucketName parameter must conform to \n\t\t\t\t\tAmazon S3 \n\t\t\t\t\tbucket naming rules.

    \n
  • \n
  • \n

    A configuration containing a custom Canonical\n\t\t\t\t\t\tName (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions\n\t\t\t\t\t\ton the use of special characters in a CNAME.

    \n
  • \n
  • \n

    In a CRL or OCSP configuration, the value of a CNAME parameter must not include a\n\t\t\t\t\t\tprotocol prefix such as \"http://\" or \"https://\".

    \n
  • \n
\n
" + "smithy.api#documentation": "

Contains information to enable support for Online Certificate Status Protocol (OCSP), certificate revocation list (CRL), both protocols, or neither. If you don't supply this parameter, existing capibilites remain unchanged. For more\n\t\t\tinformation, see the OcspConfiguration and CrlConfiguration types.

\n

The following requirements apply to revocation configurations.

\n
    \n
  • \n

    A configuration disabling CRLs or OCSP must contain only the Enabled=False \n\t\t\t\t\tparameter, and will fail if other parameters such as CustomCname or \n\t\t\t\t\tExpirationInDays are included.

    \n
  • \n
  • \n

    In a CRL configuration, the S3BucketName parameter must conform to \n\t\t\t\t\tAmazon S3 \n\t\t\t\t\tbucket naming rules.

    \n
  • \n
  • \n

    A configuration containing a custom Canonical\n\t\t\t\t\t\tName (CNAME) parameter for CRLs or OCSP must conform to RFC2396 restrictions\n\t\t\t\t\t\ton the use of special characters in a CNAME.

    \n
  • \n
  • \n

    In a CRL or OCSP configuration, the value of a CNAME parameter must not include a\n\t\t\t\t\t\tprotocol prefix such as \"http://\" or \"https://\".

    \n
  • \n
\n \n

If you update the S3BucketName of CrlConfiguration, you can break revocation for existing certificates. In other words, if you call UpdateCertificateAuthority to update the CRL configuration's S3 bucket name, Amazon Web Services Private CA only writes CRLs to the new S3 bucket. Certificates issued prior to this point will have the old S3 bucket name in your CRL Distribution Point (CDP) extension, essentially breaking revocation. If you must update the S3 bucket, you'll need to reissue old certificates to keep the revocation working. Alternatively, you can use a CustomCname in your CRL configuration if you might need to change the S3 bucket name in the future.

\n
" } }, "Status": { diff --git a/models/amplify.json b/models/amplify.json index 10d06a0aa3..45ba331ec0 100644 --- a/models/amplify.json +++ b/models/amplify.json @@ -2388,7 +2388,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a deployment for a manually deployed Amplify app. Manually deployed apps are\n not connected to a repository.

\n

The maximum duration between the CreateDeployment call and the\n StartDeployment call cannot exceed 8 hours. If the duration exceeds 8\n hours, the StartDeployment call and the associated Job will\n fail.

", + "smithy.api#documentation": "

Creates a deployment for a manually deployed Amplify app. Manually deployed apps are\n not connected to a Git repository.

\n

The maximum duration between the CreateDeployment call and the\n StartDeployment call cannot exceed 8 hours. If the duration exceeds 8\n hours, the StartDeployment call and the associated Job will\n fail.

", "smithy.api#http": { "method": "POST", "uri": "/apps/{appId}/branches/{branchName}/deployments", @@ -4280,9 +4280,21 @@ "jobType": { "target": "com.amazonaws.amplify#JobType", "traits": { - "smithy.api#documentation": "

The type for the job. If the value is RELEASE, the job was manually\n released from its source by using the StartJob API. If the value is\n RETRY, the job was manually retried using the StartJob\n API. If the value is WEB_HOOK, the job was automatically triggered by\n webhooks.

", + "smithy.api#documentation": "

The type for the job. If the value is RELEASE, the job was manually\n released from its source by using the StartJob API. This value is available only for apps\n that are connected to a repository.

\n

If the value is RETRY, the job was manually retried using the StartJob\n API. If the value is WEB_HOOK, the job was automatically triggered by\n webhooks. If the value is MANUAL, the job is for a manually deployed app. Manually deployed apps are not connected to a Git repository.

", "smithy.api#required": {} } + }, + "sourceUrl": { + "target": "com.amazonaws.amplify#SourceUrl", + "traits": { + "smithy.api#documentation": "

The source URL for the files to deploy. The source URL can be either an HTTP GET URL that is publicly accessible and\n downloads a single .zip file, or an Amazon S3 bucket and prefix.

" + } + }, + "sourceUrlType": { + "target": "com.amazonaws.amplify#SourceUrlType", + "traits": { + "smithy.api#documentation": "

The type of source specified by the sourceURL.\n If the value is ZIP, the source is a .zip file.\n If the value is BUCKET_PREFIX, the source is an Amazon S3 bucket and\n prefix. If no value is specified, the default is ZIP.

" + } } }, "traits": { @@ -5279,7 +5291,24 @@ "min": 0, "max": 3000 }, - "smithy.api#pattern": "^(?s)" + "smithy.api#pattern": "^(s3|https|http)://" + } + }, + "com.amazonaws.amplify#SourceUrlType": { + "type": "enum", + "members": { + "ZIP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ZIP" + } + }, + "BUCKET_PREFIX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BUCKET_PREFIX" + } + } } }, "com.amazonaws.amplify#StackArn": { @@ -5363,7 +5392,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a deployment for a manually deployed app. Manually deployed apps are not\n connected to a repository.

\n

The maximum duration between the CreateDeployment call and the\n StartDeployment call cannot exceed 8 hours. If the duration exceeds 8\n hours, the StartDeployment call and the associated Job will\n fail.

", + "smithy.api#documentation": "

Starts a deployment for a manually deployed app. Manually deployed apps are not\n connected to a Git repository.

\n

The maximum duration between the CreateDeployment call and the\n StartDeployment call cannot exceed 8 hours. If the duration exceeds 8\n hours, the StartDeployment call and the associated Job will\n fail.

", "smithy.api#http": { "method": "POST", "uri": "/apps/{appId}/branches/{branchName}/deployments/start", @@ -5385,7 +5414,7 @@ "branchName": { "target": "com.amazonaws.amplify#BranchName", "traits": { - "smithy.api#documentation": "

The name of the branch to use for the job.

", + "smithy.api#documentation": "

The name of the branch to use for the deployment job.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5393,13 +5422,19 @@ "jobId": { "target": "com.amazonaws.amplify#JobId", "traits": { - "smithy.api#documentation": "

The job ID for this deployment, generated by the create deployment request.

" + "smithy.api#documentation": "

The job ID for this deployment that is generated by the CreateDeployment request.

" } }, "sourceUrl": { "target": "com.amazonaws.amplify#SourceUrl", "traits": { - "smithy.api#documentation": "

The source URL for this deployment, used when calling start deployment without create\n deployment. The source URL can be any HTTP GET URL that is publicly accessible and\n downloads a single .zip file.

" + "smithy.api#documentation": "

The source URL for the deployment that is used when calling StartDeployment without CreateDeployment. The source URL can be either an HTTP GET URL that is publicly accessible and\n downloads a single .zip file, or an Amazon S3 bucket and prefix.

" + } + }, + "sourceUrlType": { + "target": "com.amazonaws.amplify#SourceUrlType", + "traits": { + "smithy.api#documentation": "

The type of source specified by the sourceURL.\n If the value is ZIP, the source is a .zip file.\n If the value is BUCKET_PREFIX, the source is an Amazon S3 bucket and\n prefix. If no value is specified, the default is ZIP.

" } } }, diff --git a/models/api-gateway.json b/models/api-gateway.json index 70d7472e29..71a38b5a50 100644 --- a/models/api-gateway.json +++ b/models/api-gateway.json @@ -4877,7 +4877,7 @@ "certificateUploadDate": { "target": "com.amazonaws.apigateway#Timestamp", "traits": { - "smithy.api#documentation": "

The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded.

" + "smithy.api#documentation": "

The timestamp when the certificate that was used by edge-optimized endpoint for this domain name was uploaded. API Gateway doesn't change this value if you update the certificate.

" } }, "regionalDomainName": { diff --git a/models/appflow.json b/models/appflow.json index b7289d08e0..b905e85532 100644 --- a/models/appflow.json +++ b/models/appflow.json @@ -7886,7 +7886,7 @@ "oAuth2GrantType": { "target": "com.amazonaws.appflow#OAuth2GrantType", "traits": { - "smithy.api#documentation": "

Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an\n access token from Salesforce. Amazon AppFlow requires an access token each time it\n attempts to access your Salesforce records.

\n

You can specify one of the following values:

\n
\n
AUTHORIZATION_CODE
\n
\n

Amazon AppFlow passes an authorization code when it requests the access token\n from Salesforce. Amazon AppFlow receives the authorization code from Salesforce\n after you log in to your Salesforce account and authorize Amazon AppFlow to access\n your records.

\n
\n
CLIENT_CREDENTIALS
\n
\n

Amazon AppFlow passes client credentials (a client ID and client secret) when\n it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account.

\n
\n
JWT_BEARER
\n
\n

Amazon AppFlow passes a JSON web token (JWT) when it requests the access token\n from Salesforce. You provide the JWT to Amazon AppFlow when you define the\n connection to your Salesforce account. When you use this grant type, you don't need to\n log in to your Salesforce account to authorize Amazon AppFlow to access your\n records.

\n
\n
" + "smithy.api#documentation": "

Specifies the OAuth 2.0 grant type that Amazon AppFlow uses when it requests an\n access token from Salesforce. Amazon AppFlow requires an access token each time it\n attempts to access your Salesforce records.

\n

You can specify one of the following values:

\n
\n
AUTHORIZATION_CODE
\n
\n

Amazon AppFlow passes an authorization code when it requests the access token\n from Salesforce. Amazon AppFlow receives the authorization code from Salesforce\n after you log in to your Salesforce account and authorize Amazon AppFlow to access\n your records.

\n
\n
JWT_BEARER
\n
\n

Amazon AppFlow passes a JSON web token (JWT) when it requests the access token\n from Salesforce. You provide the JWT to Amazon AppFlow when you define the\n connection to your Salesforce account. When you use this grant type, you don't need to\n log in to your Salesforce account to authorize Amazon AppFlow to access your\n records.

\n
\n
\n \n

The CLIENT_CREDENTIALS value is not supported for Salesforce.

\n
" } }, "jwtToken": { @@ -8014,7 +8014,7 @@ "oauth2GrantTypesSupported": { "target": "com.amazonaws.appflow#OAuth2GrantTypeSupportedList", "traits": { - "smithy.api#documentation": "

The OAuth 2.0 grant types that Amazon AppFlow can use when it requests an access\n token from Salesforce. Amazon AppFlow requires an access token each time it attempts to\n access your Salesforce records.

\n
\n
AUTHORIZATION_CODE
\n
\n

Amazon AppFlow passes an authorization code when it requests the access token\n from Salesforce. Amazon AppFlow receives the authorization code from Salesforce\n after you log in to your Salesforce account and authorize Amazon AppFlow to access\n your records.

\n
\n
CLIENT_CREDENTIALS
\n
\n

Amazon AppFlow passes client credentials (a client ID and client secret) when\n it requests the access token from Salesforce. You provide these credentials to Amazon AppFlow when you define the connection to your Salesforce account.

\n
\n
JWT_BEARER
\n
\n

Amazon AppFlow passes a JSON web token (JWT) when it requests the access token\n from Salesforce. You provide the JWT to Amazon AppFlow when you define the\n connection to your Salesforce account. When you use this grant type, you don't need to\n log in to your Salesforce account to authorize Amazon AppFlow to access your\n records.

\n
\n
" + "smithy.api#documentation": "

The OAuth 2.0 grant types that Amazon AppFlow can use when it requests an access\n token from Salesforce. Amazon AppFlow requires an access token each time it attempts to\n access your Salesforce records.

\n
\n
AUTHORIZATION_CODE
\n
\n

Amazon AppFlow passes an authorization code when it requests the access token\n from Salesforce. Amazon AppFlow receives the authorization code from Salesforce\n after you log in to your Salesforce account and authorize Amazon AppFlow to access\n your records.

\n
\n
JWT_BEARER
\n
\n

Amazon AppFlow passes a JSON web token (JWT) when it requests the access token\n from Salesforce. You provide the JWT to Amazon AppFlow when you define the\n connection to your Salesforce account. When you use this grant type, you don't need to\n log in to your Salesforce account to authorize Amazon AppFlow to access your\n records.

\n
\n
\n \n

The CLIENT_CREDENTIALS value is not supported for Salesforce.

\n
" } } }, diff --git a/models/appstream.json b/models/appstream.json index 3d58a636ab..f60bb59d4d 100644 --- a/models/appstream.json +++ b/models/appstream.json @@ -137,6 +137,12 @@ "traits": { "smithy.api#enumValue": "DOMAIN_SMART_CARD_SIGNIN" } + }, + "AUTO_TIME_ZONE_REDIRECTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTO_TIME_ZONE_REDIRECTION" + } } } }, diff --git a/models/athena.json b/models/athena.json index eaee8c9732..5dddda8269 100644 --- a/models/athena.json +++ b/models/athena.json @@ -2172,6 +2172,191 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" } }, + "com.amazonaws.athena#ConnectionType": { + "type": "enum", + "members": { + "DYNAMODB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DYNAMODB" + } + }, + "MYSQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MYSQL" + } + }, + "POSTGRESQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "POSTGRESQL" + } + }, + "REDSHIFT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REDSHIFT" + } + }, + "ORACLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ORACLE" + } + }, + "SYNAPSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SYNAPSE" + } + }, + "SQLSERVER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SQLSERVER" + } + }, + "DB2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DB2" + } + }, + "OPENSEARCH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPENSEARCH" + } + }, + "BIGQUERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BIGQUERY" + } + }, + "GOOGLECLOUDSTORAGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GOOGLECLOUDSTORAGE" + } + }, + "HBASE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HBASE" + } + }, + "DOCUMENTDB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOCUMENTDB" + } + }, + "MSK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MSK" + } + }, + "NEPTUNE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NEPTUNE" + } + }, + "CMDB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CMDB" + } + }, + "TPCDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TPCDS" + } + }, + "REDIS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REDIS" + } + }, + "CLOUDWATCH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLOUDWATCH" + } + }, + "TIMESTREAM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TIMESTREAM" + } + }, + "SAPHANA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SAPHANA" + } + }, + "SNOWFLAKE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SNOWFLAKE" + } + }, + "TERADATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TERADATA" + } + }, + "VERTICA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VERTICA" + } + }, + "CLOUDERAIMPALA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLOUDERAIMPALA" + } + }, + "CLOUDERAHIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLOUDERAHIVE" + } + }, + "HORTONWORKSHIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HORTONWORKSHIVE" + } + }, + "DATALAKEGEN2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATALAKEGEN2" + } + }, + "DB2AS400": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DB2AS400" + } + }, + "CLOUDWATCHMETRICS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLOUDWATCHMETRICS" + } + } + } + }, "com.amazonaws.athena#CoordinatorDpuSize": { "type": "integer", "traits": { @@ -2270,7 +2455,7 @@ "Type": { "target": "com.amazonaws.athena#DataCatalogType", "traits": { - "smithy.api#documentation": "

The type of data catalog to create: LAMBDA for a federated catalog,\n HIVE for an external hive metastore, or GLUE for an\n Glue Data Catalog.

", + "smithy.api#documentation": "

The type of data catalog to create: LAMBDA for a federated catalog,\n GLUE for an Glue Data Catalog, and HIVE for an\n external Apache Hive metastore. FEDERATED is a federated catalog for which\n Athena creates the connection and the Lambda function for\n you based on the parameters that you pass.

", "smithy.api#required": {} } }, @@ -2283,7 +2468,7 @@ "Parameters": { "target": "com.amazonaws.athena#ParametersMap", "traits": { - "smithy.api#documentation": "

Specifies the Lambda function or functions to use for creating the data\n catalog. This is a mapping whose values depend on the catalog type.

\n
    \n
  • \n

    For the HIVE data catalog type, use the following syntax. The\n metadata-function parameter is required. The\n sdk-version parameter is optional and defaults to the currently\n supported version.

    \n

    \n metadata-function=lambda_arn,\n sdk-version=version_number\n \n

    \n
  • \n
  • \n

    For the LAMBDA data catalog type, use one of the following sets\n of required parameters, but not both.

    \n
      \n
    • \n

      If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.

      \n

      \n metadata-function=lambda_arn,\n record-function=lambda_arn\n \n

      \n
    • \n
    • \n

      If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.

      \n

      \n function=lambda_arn\n \n

      \n
    • \n
    \n
  • \n
  • \n

    The GLUE type takes a catalog ID parameter and is required. The\n \n catalog_id\n is the account ID of the\n Amazon Web Services account to which the Glue Data Catalog\n belongs.

    \n

    \n catalog-id=catalog_id\n \n

    \n
      \n
    • \n

      The GLUE data catalog type also applies to the default\n AwsDataCatalog that already exists in your account, of\n which you can have only one and cannot modify.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Specifies the Lambda function or functions to use for creating the data\n catalog. This is a mapping whose values depend on the catalog type.

\n
    \n
  • \n

    For the HIVE data catalog type, use the following syntax. The\n metadata-function parameter is required. The\n sdk-version parameter is optional and defaults to the currently\n supported version.

    \n

    \n metadata-function=lambda_arn,\n sdk-version=version_number\n \n

    \n
  • \n
  • \n

    For the LAMBDA data catalog type, use one of the following sets\n of required parameters, but not both.

    \n
      \n
    • \n

      If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.

      \n

      \n metadata-function=lambda_arn,\n record-function=lambda_arn\n \n

      \n
    • \n
    • \n

      If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.

      \n

      \n function=lambda_arn\n \n

      \n
    • \n
    \n
  • \n
  • \n

    The GLUE type takes a catalog ID parameter and is required. The\n \n catalog_id\n is the account ID of the\n Amazon Web Services account to which the Glue Data Catalog\n belongs.

    \n

    \n catalog-id=catalog_id\n \n

    \n
      \n
    • \n

      The GLUE data catalog type also applies to the default\n AwsDataCatalog that already exists in your account, of\n which you can have only one and cannot modify.

      \n
    • \n
    \n
  • \n
  • \n

    The FEDERATED data catalog type uses one of the following\n parameters, but not both. Use connection-arn for an existing\n Glue connection. Use connection-type and\n connection-properties to specify the configuration setting for\n a new connection.

    \n
      \n
    • \n

      \n connection-arn:\n \n

      \n
    • \n
    • \n

      \n lambda-role-arn (optional): The execution role to use for the\n Lambda function. If not provided, one is created.

      \n
    • \n
    • \n

      \n connection-type:MYSQL|REDSHIFT|....,\n connection-properties:\"\"\n

      \n

      For \n \n , use escaped\n JSON text, as in the following example.

      \n

      \n \"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"\n

      \n
    • \n
    \n
  • \n
" } }, "Tags": { @@ -2299,7 +2484,11 @@ }, "com.amazonaws.athena#CreateDataCatalogOutput": { "type": "structure", - "members": {}, + "members": { + "DataCatalog": { + "target": "com.amazonaws.athena#DataCatalog" + } + }, "traits": { "smithy.api#output": {} } @@ -2675,14 +2864,32 @@ "Type": { "target": "com.amazonaws.athena#DataCatalogType", "traits": { - "smithy.api#documentation": "

The type of data catalog to create: LAMBDA for a federated catalog,\n HIVE for an external hive metastore, or GLUE for an\n Glue Data Catalog.

", + "smithy.api#documentation": "

The type of data catalog to create: LAMBDA for a federated catalog,\n GLUE for an Glue Data Catalog, and HIVE for an\n external Apache Hive metastore. FEDERATED is a federated catalog for which\n Athena creates the connection and the Lambda function for\n you based on the parameters that you pass.

", "smithy.api#required": {} } }, "Parameters": { "target": "com.amazonaws.athena#ParametersMap", "traits": { - "smithy.api#documentation": "

Specifies the Lambda function or functions to use for the data catalog.\n This is a mapping whose values depend on the catalog type.

\n
    \n
  • \n

    For the HIVE data catalog type, use the following syntax. The\n metadata-function parameter is required. The\n sdk-version parameter is optional and defaults to the currently\n supported version.

    \n

    \n metadata-function=lambda_arn,\n sdk-version=version_number\n \n

    \n
  • \n
  • \n

    For the LAMBDA data catalog type, use one of the following sets\n of required parameters, but not both.

    \n
      \n
    • \n

      If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.

      \n

      \n metadata-function=lambda_arn,\n record-function=lambda_arn\n \n

      \n
    • \n
    • \n

      If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.

      \n

      \n function=lambda_arn\n \n

      \n
    • \n
    \n
  • \n
  • \n

    The GLUE type takes a catalog ID parameter and is required. The\n \n catalog_id\n is the account ID of the\n Amazon Web Services account to which the Glue catalog\n belongs.

    \n

    \n catalog-id=catalog_id\n \n

    \n
      \n
    • \n

      The GLUE data catalog type also applies to the default\n AwsDataCatalog that already exists in your account, of\n which you can have only one and cannot modify.

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Specifies the Lambda function or functions to use for the data catalog.\n This is a mapping whose values depend on the catalog type.

\n
    \n
  • \n

    For the HIVE data catalog type, use the following syntax. The\n metadata-function parameter is required. The\n sdk-version parameter is optional and defaults to the currently\n supported version.

    \n

    \n metadata-function=lambda_arn,\n sdk-version=version_number\n \n

    \n
  • \n
  • \n

    For the LAMBDA data catalog type, use one of the following sets\n of required parameters, but not both.

    \n
      \n
    • \n

      If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.

      \n

      \n metadata-function=lambda_arn,\n record-function=lambda_arn\n \n

      \n
    • \n
    • \n

      If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.

      \n

      \n function=lambda_arn\n \n

      \n
    • \n
    \n
  • \n
  • \n

    The GLUE type takes a catalog ID parameter and is required. The\n \n catalog_id\n is the account ID of the\n Amazon Web Services account to which the Glue catalog\n belongs.

    \n

    \n catalog-id=catalog_id\n \n

    \n
      \n
    • \n

      The GLUE data catalog type also applies to the default\n AwsDataCatalog that already exists in your account, of\n which you can have only one and cannot modify.

      \n
    • \n
    \n
  • \n
  • \n

    The FEDERATED data catalog type uses one of the following\n parameters, but not both. Use connection-arn for an existing\n Glue connection. Use connection-type and\n connection-properties to specify the configuration setting for\n a new connection.

    \n
      \n
    • \n

      \n connection-arn:\n \n

      \n
    • \n
    • \n

      \n connection-type:MYSQL|REDSHIFT|....,\n connection-properties:\"\"\n

      \n

      For \n \n , use escaped\n JSON text, as in the following example.

      \n

      \n \"{\\\"spill_bucket\\\":\\\"my_spill\\\",\\\"spill_prefix\\\":\\\"athena-spill\\\",\\\"host\\\":\\\"abc12345.snowflakecomputing.com\\\",\\\"port\\\":\\\"1234\\\",\\\"warehouse\\\":\\\"DEV_WH\\\",\\\"database\\\":\\\"TEST\\\",\\\"schema\\\":\\\"PUBLIC\\\",\\\"SecretArn\\\":\\\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\\\"}\"\n

      \n
    • \n
    \n
  • \n
" + } + }, + "Status": { + "target": "com.amazonaws.athena#DataCatalogStatus", + "traits": { + "smithy.api#documentation": "

The status of the creation or deletion of the data catalog.

\n
    \n
  • \n

    The LAMBDA, GLUE, and HIVE data catalog\n types are created synchronously. Their status is either\n CREATE_COMPLETE or CREATE_FAILED.

    \n
  • \n
  • \n

    The FEDERATED data catalog type is created asynchronously.

    \n
  • \n
\n

Data catalog creation status:

\n
    \n
  • \n

    \n CREATE_IN_PROGRESS: Federated data catalog creation in\n progress.

    \n
  • \n
  • \n

    \n CREATE_COMPLETE: Data catalog creation complete.

    \n
  • \n
  • \n

    \n CREATE_FAILED: Data catalog could not be created.

    \n
  • \n
  • \n

    \n CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog\n creation failed and is being removed.

    \n
  • \n
  • \n

    \n CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation\n failed and was removed.

    \n
  • \n
  • \n

    \n CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation\n failed but could not be removed.

    \n
  • \n
\n

Data catalog deletion status:

\n
    \n
  • \n

    \n DELETE_IN_PROGRESS: Federated data catalog deletion in\n progress.

    \n
  • \n
  • \n

    \n DELETE_COMPLETE: Federated data catalog deleted.

    \n
  • \n
  • \n

    \n DELETE_FAILED: Federated data catalog could not be\n deleted.

    \n
  • \n
" + } + }, + "ConnectionType": { + "target": "com.amazonaws.athena#ConnectionType", + "traits": { + "smithy.api#documentation": "

The type of connection for a FEDERATED data catalog (for example,\n REDSHIFT, MYSQL, or SQLSERVER). For\n information about individual connectors, see Available data source\n connectors.

" + } + }, + "Error": { + "target": "com.amazonaws.athena#ErrorMessage", + "traits": { + "smithy.api#documentation": "

Text of the error that occurred during data catalog creation or deletion.

" } } }, @@ -2690,6 +2897,65 @@ "smithy.api#documentation": "

Contains information about a data catalog in an Amazon Web Services account.

\n \n

In the Athena console, data catalogs are listed as \"data sources\" on\n the Data sources page under the Data source name column.

\n
" } }, + "com.amazonaws.athena#DataCatalogStatus": { + "type": "enum", + "members": { + "CREATE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_IN_PROGRESS" + } + }, + "CREATE_COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_COMPLETE" + } + }, + "CREATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_FAILED" + } + }, + "CREATE_FAILED_CLEANUP_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_FAILED_CLEANUP_IN_PROGRESS" + } + }, + "CREATE_FAILED_CLEANUP_COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_FAILED_CLEANUP_COMPLETE" + } + }, + "CREATE_FAILED_CLEANUP_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CREATE_FAILED_CLEANUP_FAILED" + } + }, + "DELETE_IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_IN_PROGRESS" + } + }, + "DELETE_COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_COMPLETE" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELETE_FAILED" + } + } + } + }, "com.amazonaws.athena#DataCatalogSummary": { "type": "structure", "members": { @@ -2704,6 +2970,24 @@ "traits": { "smithy.api#documentation": "

The data catalog type.

" } + }, + "Status": { + "target": "com.amazonaws.athena#DataCatalogStatus", + "traits": { + "smithy.api#documentation": "

The status of the creation or deletion of the data catalog.

\n
    \n
  • \n

    The LAMBDA, GLUE, and HIVE data catalog\n types are created synchronously. Their status is either\n CREATE_COMPLETE or CREATE_FAILED.

    \n
  • \n
  • \n

    The FEDERATED data catalog type is created asynchronously.

    \n
  • \n
\n

Data catalog creation status:

\n
    \n
  • \n

    \n CREATE_IN_PROGRESS: Federated data catalog creation in\n progress.

    \n
  • \n
  • \n

    \n CREATE_COMPLETE: Data catalog creation complete.

    \n
  • \n
  • \n

    \n CREATE_FAILED: Data catalog could not be created.

    \n
  • \n
  • \n

    \n CREATE_FAILED_CLEANUP_IN_PROGRESS: Federated data catalog\n creation failed and is being removed.

    \n
  • \n
  • \n

    \n CREATE_FAILED_CLEANUP_COMPLETE: Federated data catalog creation\n failed and was removed.

    \n
  • \n
  • \n

    \n CREATE_FAILED_CLEANUP_FAILED: Federated data catalog creation\n failed but could not be removed.

    \n
  • \n
\n

Data catalog deletion status:

\n
    \n
  • \n

    \n DELETE_IN_PROGRESS: Federated data catalog deletion in\n progress.

    \n
  • \n
  • \n

    \n DELETE_COMPLETE: Federated data catalog deleted.

    \n
  • \n
  • \n

    \n DELETE_FAILED: Federated data catalog could not be\n deleted.

    \n
  • \n
" + } + }, + "ConnectionType": { + "target": "com.amazonaws.athena#ConnectionType", + "traits": { + "smithy.api#documentation": "

The type of connection for a FEDERATED data catalog (for example,\n REDSHIFT, MYSQL, or SQLSERVER). For\n information about individual connectors, see Available data source\n connectors.

" + } + }, + "Error": { + "target": "com.amazonaws.athena#ErrorMessage", + "traits": { + "smithy.api#documentation": "

Text of the error that occurred during data catalog creation or deletion.

" + } } }, "traits": { @@ -2736,6 +3020,12 @@ "traits": { "smithy.api#enumValue": "HIVE" } + }, + "FEDERATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FEDERATED" + } } } }, @@ -2887,7 +3177,11 @@ }, "com.amazonaws.athena#DeleteDataCatalogOutput": { "type": "structure", - "members": {}, + "members": { + "DataCatalog": { + "target": "com.amazonaws.athena#DataCatalog" + } + }, "traits": { "smithy.api#output": {} } diff --git a/models/b2bi.json b/models/b2bi.json index 962797f812..796a567f58 100644 --- a/models/b2bi.json +++ b/models/b2bi.json @@ -30,6 +30,9 @@ "type": "service", "version": "2022-06-23", "operations": [ + { + "target": "com.amazonaws.b2bi#CreateStarterMappingTemplate" + }, { "target": "com.amazonaws.b2bi#GetTransformerJob" }, @@ -42,6 +45,9 @@ { "target": "com.amazonaws.b2bi#TagResource" }, + { + "target": "com.amazonaws.b2bi#TestConversion" + }, { "target": "com.amazonaws.b2bi#TestMapping" }, @@ -815,6 +821,23 @@ "smithy.api#documentation": "

A capability object. Currently, only EDI (electronic data interchange) capabilities are supported. A trading capability contains the information required to transform incoming EDI documents into JSON or XML outputs.

" } }, + "com.amazonaws.b2bi#CapabilityDirection": { + "type": "enum", + "members": { + "INBOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INBOUND" + } + }, + "OUTBOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OUTBOUND" + } + } + } + }, "com.amazonaws.b2bi#CapabilityId": { "type": "string", "traits": { @@ -840,6 +863,20 @@ } } }, + "com.amazonaws.b2bi#CapabilityOptions": { + "type": "structure", + "members": { + "outboundEdi": { + "target": "com.amazonaws.b2bi#OutboundEdiOptions", + "traits": { + "smithy.api#documentation": "

A structure that contains the outbound EDI options.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the details for an Outbound EDI capability.

" + } + }, "com.amazonaws.b2bi#CapabilitySummary": { "type": "structure", "members": { @@ -909,6 +946,94 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.b2bi#ConversionSource": { + "type": "structure", + "members": { + "fileFormat": { + "target": "com.amazonaws.b2bi#ConversionSourceFormat", + "traits": { + "smithy.api#documentation": "

The format for the input file: either JSON or XML.

", + "smithy.api#required": {} + } + }, + "inputFile": { + "target": "com.amazonaws.b2bi#InputFileSource", + "traits": { + "smithy.api#documentation": "File to be converted", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes the input for an outbound transformation.

" + } + }, + "com.amazonaws.b2bi#ConversionSourceFormat": { + "type": "enum", + "members": { + "JSON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JSON" + } + }, + "XML": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "XML" + } + } + } + }, + "com.amazonaws.b2bi#ConversionTarget": { + "type": "structure", + "members": { + "fileFormat": { + "target": "com.amazonaws.b2bi#ConversionTargetFormat", + "traits": { + "smithy.api#documentation": "

Currently, only X12 format is supported.

", + "smithy.api#required": {} + } + }, + "formatDetails": { + "target": "com.amazonaws.b2bi#ConversionTargetFormatDetails", + "traits": { + "smithy.api#documentation": "

A structure that contains the formatting details for the conversion target.

" + } + }, + "outputSampleFile": { + "target": "com.amazonaws.b2bi#OutputSampleFileSource", + "traits": { + "smithy.api#documentation": "Customer uses this to provide a sample on what should file look like after conversion\nX12 EDI use case around this would be discovering the file syntax" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provide a sample of what the output of the transformation should look like.

" + } + }, + "com.amazonaws.b2bi#ConversionTargetFormat": { + "type": "enum", + "members": { + "X12": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "X12" + } + } + } + }, + "com.amazonaws.b2bi#ConversionTargetFormatDetails": { + "type": "union", + "members": { + "x12": { + "target": "com.amazonaws.b2bi#X12Details" + } + }, + "traits": { + "smithy.api#documentation": "

Contains a structure describing the X12 details for the conversion target.

" + } + }, "com.amazonaws.b2bi#CreateCapability": { "type": "operation", "input": { @@ -1246,6 +1371,12 @@ "smithy.api#required": {} } }, + "capabilityOptions": { + "target": "com.amazonaws.b2bi#CapabilityOptions", + "traits": { + "smithy.api#documentation": "

Specify the structure that contains the details for the associated capabilities.

" + } + }, "clientToken": { "target": "smithy.api#String", "traits": { @@ -1313,6 +1444,12 @@ "smithy.api#documentation": "

Returns one or more capabilities associated with this partnership.

" } }, + "capabilityOptions": { + "target": "com.amazonaws.b2bi#CapabilityOptions", + "traits": { + "smithy.api#documentation": "

Returns the structure that contains the details for the associated capabilities.

" + } + }, "tradingPartnerId": { "target": "com.amazonaws.b2bi#TradingPartnerId", "traits": { @@ -1539,6 +1676,101 @@ "smithy.api#output": {} } }, + "com.amazonaws.b2bi#CreateStarterMappingTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.b2bi#CreateStarterMappingTemplateRequest" + }, + "output": { + "target": "com.amazonaws.b2bi#CreateStarterMappingTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.b2bi#AccessDeniedException" + }, + { + "target": "com.amazonaws.b2bi#InternalServerException" + }, + { + "target": "com.amazonaws.b2bi#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.b2bi#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Amazon Web Services B2B Data Interchange uses a mapping template in JSONata or XSLT format to transform a customer input file into a JSON or XML file that can be converted to EDI.

\n

If you provide a sample EDI file with the same structure as the EDI files that you wish to generate, then the service can generate a mapping template.\n The starter template contains placeholder values which you can replace with JSONata or XSLT expressions to take data from your input file and insert it\n into the JSON or XML file that is used to generate the EDI.

\n

If you do not provide a sample EDI file, then the service can generate a mapping template based on the EDI settings in the templateDetails parameter.\n

\n

Currently, we only support generating a template that can generate the input to produce an Outbound X12 EDI file.

", + "smithy.api#examples": [ + { + "title": "Sample CreateStarterMappingTemplate call", + "input": { + "mappingType": "JSONATA", + "templateDetails": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } + }, + "outputSampleLocation": { + "bucketName": "output-sample-bucket", + "key": "output-sample-key" + } + }, + "output": { + "mappingTemplate": "Example Mapping Template" + } + } + ], + "smithy.api#http": { + "code": 200, + "uri": "/createmappingstarttemplate", + "method": "POST" + } + } + }, + "com.amazonaws.b2bi#CreateStarterMappingTemplateRequest": { + "type": "structure", + "members": { + "outputSampleLocation": { + "target": "com.amazonaws.b2bi#S3Location", + "traits": { + "smithy.api#documentation": "

Specify the location of the sample EDI file that is used to generate the mapping template.

" + } + }, + "mappingType": { + "target": "com.amazonaws.b2bi#MappingType", + "traits": { + "smithy.api#documentation": "

Specify the format for the mapping template: either JSONATA or XSLT.

", + "smithy.api#required": {} + } + }, + "templateDetails": { + "target": "com.amazonaws.b2bi#TemplateDetails", + "traits": { + "smithy.api#documentation": "

\n Describes the details needed for generating the template. Specify the X12 transaction set and version for which the template is used:\n currently, we only support X12.\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.b2bi#CreateStarterMappingTemplateResponse": { + "type": "structure", + "members": { + "mappingTemplate": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Returns a string that represents the mapping template.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.b2bi#CreateTransformer": { "type": "operation", "input": { @@ -1572,24 +1804,46 @@ ], "traits": { "aws.iam#requiredActions": [ - "b2bi:TagResource" + "b2bi:TagResource", + "b2bi:UpdateTransformer", + "logs:CreateLogDelivery", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:DescribeResourcePolicies", + "logs:ListLogDeliveries", + "logs:PutLogEvents", + "logs:PutResourcePolicy" ], - "smithy.api#documentation": "

Creates a transformer. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", + "smithy.api#documentation": "

Creates a transformer. Amazon Web Services B2B Data Interchange currently supports two scenarios:

\n
    \n
  • \n

    \n Inbound EDI: the Amazon Web Services customer receives an EDI file from their trading partner. Amazon Web Services B2B Data Interchange \n converts this EDI file into a JSON or XML file with a service-defined structure. A mapping template provided by the customer,\n in JSONata or XSLT format, is optionally applied to this file to produce a JSON or XML file with the structure the customer requires.

    \n
  • \n
  • \n

    \n Outbound EDI: the Amazon Web Services customer has a JSON or XML file containing data that they wish to use\n in an EDI file. A mapping template, provided by the customer (in either JSONata or XSLT format) is applied to this file to generate\n a JSON or XML file in the service-defined structure. This file is then converted to an EDI file.

    \n
  • \n
\n \n

The following fields are provided for backwards compatibility only: fileFormat,\n mappingTemplate, ediType, and sampleDocument.

\n
    \n
  • \n

    Use the mapping data type in place of mappingTemplate and fileFormat\n

    \n
  • \n
  • \n

    Use the sampleDocuments data type in place of sampleDocument\n

    \n
  • \n
  • \n

    Use either the inputConversion or outputConversion in place of ediType\n

    \n
  • \n
\n
", "smithy.api#examples": [ { "title": "Sample CreateTransformer call", "input": { "clientToken": "foo", - "ediType": { - "x12Details": { - "transactionSet": "X12_110", - "version": "VERSION_4010" + "name": "transformX12", + "inputConversion": { + "fromFormat": "X12", + "formatOptions": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } } }, - "fileFormat": "JSON", - "mappingTemplate": "{}", - "name": "transformJSON", - "sampleDocument": "s3://test-bucket/sampleDoc.txt", + "mapping": { + "templateLanguage": "JSONATA", + "template": "{}" + }, + "sampleDocuments": { + "bucketName": "test-bucket", + "keys": [ + { + "input": "sampleDoc.txt" + } + ] + }, "tags": [ { "Key": "sampleKey", @@ -1599,16 +1853,28 @@ }, "output": { "createdAt": "2023-11-01T21:51:05.504Z", - "ediType": { - "x12Details": { - "transactionSet": "X12_110", - "version": "VERSION_4010" + "name": "transformX12", + "inputConversion": { + "fromFormat": "X12", + "formatOptions": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } } }, - "fileFormat": "JSON", - "mappingTemplate": "$", - "name": "transformJSON", - "sampleDocument": "s3://test-bucket/sampleDoc.txt", + "mapping": { + "templateLanguage": "JSONATA", + "template": "{}" + }, + "sampleDocuments": { + "bucketName": "test-bucket", + "keys": [ + { + "input": "sampleDoc.txt" + } + ] + }, "status": "inactive", "transformerArn": "arn:aws:b2bi:us-west-2:123456789012:transformer/tr-974c129999f84d8c9", "transformerId": "tr-974c129999f84d8c9" @@ -1633,45 +1899,78 @@ "smithy.api#required": {} } }, + "clientToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Reserved for future use.

", + "smithy.api#idempotencyToken": {} + } + }, + "tags": { + "target": "com.amazonaws.b2bi#TagList", + "traits": { + "aws.cloudformation#cfnMutability": "full", + "smithy.api#documentation": "

Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose.

" + } + }, "fileFormat": { "target": "com.amazonaws.b2bi#FileFormat", "traits": { - "smithy.api#documentation": "

Specifies that the currently supported file formats for EDI transformations are JSON and XML.

", - "smithy.api#required": {} + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Specifies that the currently supported file formats for EDI transformations are JSON and XML.

" } }, "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", - "smithy.api#required": {} + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

\n \n

This parameter is available for backwards compatibility. Use the Mapping data type instead.

\n
" } }, "ediType": { "target": "com.amazonaws.b2bi#EdiType", "traits": { - "smithy.api#documentation": "

Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

", - "smithy.api#required": {} + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" } }, "sampleDocument": { "target": "com.amazonaws.b2bi#FileLocation", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" } }, - "clientToken": { - "target": "smithy.api#String", + "inputConversion": { + "target": "com.amazonaws.b2bi#InputConversion", "traits": { - "smithy.api#documentation": "

Reserved for future use.

", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "

Specify the InputConversion object, which contains the format options for the inbound transformation.

" } }, - "tags": { - "target": "com.amazonaws.b2bi#TagList", + "mapping": { + "target": "com.amazonaws.b2bi#Mapping", "traits": { - "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "

Specifies the key-value pairs assigned to ARNs that you can use to group and search for resources by type. You can attach this metadata to resources (capabilities, partnerships, and so on) for any purpose.

" + "smithy.api#documentation": "

Specify the structure that contains the mapping template and its language (either XSLT or JSONATA).

" + } + }, + "outputConversion": { + "target": "com.amazonaws.b2bi#OutputConversion", + "traits": { + "smithy.api#documentation": "

A structure that contains the OutputConversion object, which contains the format options for the outbound transformation.

" + } + }, + "sampleDocuments": { + "target": "com.amazonaws.b2bi#SampleDocuments", + "traits": { + "smithy.api#documentation": "

Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" } } }, @@ -1703,20 +2002,6 @@ "smithy.api#required": {} } }, - "fileFormat": { - "target": "com.amazonaws.b2bi#FileFormat", - "traits": { - "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

", - "smithy.api#required": {} - } - }, - "mappingTemplate": { - "target": "com.amazonaws.b2bi#MappingTemplate", - "traits": { - "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", - "smithy.api#required": {} - } - }, "status": { "target": "com.amazonaws.b2bi#TransformerStatus", "traits": { @@ -1724,24 +2009,75 @@ "smithy.api#required": {} } }, - "ediType": { - "target": "com.amazonaws.b2bi#EdiType", + "createdAt": { + "target": "com.amazonaws.b2bi#CreatedDate", "traits": { - "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

", + "smithy.api#documentation": "

Returns a timestamp for creation date and time of the transformer.

", "smithy.api#required": {} } }, - "sampleDocument": { - "target": "com.amazonaws.b2bi#FileLocation", + "fileFormat": { + "target": "com.amazonaws.b2bi#FileFormat", "traits": { - "smithy.api#documentation": "

Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

" } }, - "createdAt": { - "target": "com.amazonaws.b2bi#CreatedDate", + "mappingTemplate": { + "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Returns a timestamp for creation date and time of the transformer.

", - "smithy.api#required": {} + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" + } + }, + "ediType": { + "target": "com.amazonaws.b2bi#EdiType", + "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" + } + }, + "sampleDocument": { + "target": "com.amazonaws.b2bi#FileLocation", + "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" + } + }, + "inputConversion": { + "target": "com.amazonaws.b2bi#InputConversion", + "traits": { + "smithy.api#documentation": "

Returns the InputConversion object, which contains the format options for the inbound transformation.

" + } + }, + "mapping": { + "target": "com.amazonaws.b2bi#Mapping", + "traits": { + "smithy.api#documentation": "

Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).

" + } + }, + "outputConversion": { + "target": "com.amazonaws.b2bi#OutputConversion", + "traits": { + "smithy.api#documentation": "

Returns the OutputConversion object, which contains the format options for the outbound transformation.

" + } + }, + "sampleDocuments": { + "target": "com.amazonaws.b2bi#SampleDocuments", + "traits": { + "smithy.api#documentation": "

Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" } } }, @@ -1974,7 +2310,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified transformer. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", + "smithy.api#documentation": "

Deletes the specified transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively,\n a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

", "smithy.api#examples": [ { "title": "Sample DeleteTransformer call", @@ -2010,6 +2346,12 @@ "com.amazonaws.b2bi#EdiConfiguration": { "type": "structure", "members": { + "capabilityDirection": { + "target": "com.amazonaws.b2bi#CapabilityDirection", + "traits": { + "smithy.api#documentation": "

Specifies whether this is capability is for inbound or outbound transformations.

" + } + }, "type": { "target": "com.amazonaws.b2bi#EdiType", "traits": { @@ -2054,7 +2396,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" + "smithy.api#documentation": "

Specifies the details for the EDI standard that is being used for the transformer.\n Currently, only X12 is supported. X12 is a set of standards and corresponding messages \n that define specific business documents.

" } }, "com.amazonaws.b2bi#Email": { @@ -2091,6 +2433,12 @@ "traits": { "smithy.api#enumValue": "JSON" } + }, + "NOT_USED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NOT_USED" + } } } }, @@ -2103,6 +2451,28 @@ } } }, + "com.amazonaws.b2bi#FormatOptions": { + "type": "union", + "members": { + "x12": { + "target": "com.amazonaws.b2bi#X12Details" + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains the X12 transaction set and version.

" + } + }, + "com.amazonaws.b2bi#FromFormat": { + "type": "enum", + "members": { + "X12": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "X12" + } + } + } + }, "com.amazonaws.b2bi#GetCapability": { "type": "operation", "input": { @@ -2378,6 +2748,9 @@ "smithy.api#documentation": "

Returns one or more capabilities associated with this partnership.

" } }, + "capabilityOptions": { + "target": "com.amazonaws.b2bi#CapabilityOptions" + }, "tradingPartnerId": { "target": "com.amazonaws.b2bi#TradingPartnerId", "traits": { @@ -2575,7 +2948,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the details for the transformer specified by the transformer ID. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", + "smithy.api#documentation": "

Retrieves the details for the transformer specified by the transformer ID. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively,\n a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

", "smithy.api#examples": [ { "title": "Sample GetTransformer call", @@ -2584,17 +2957,28 @@ }, "output": { "createdAt": "2023-11-01T21:51:05.504Z", - "ediType": { - "x12Details": { - "transactionSet": "X12_110", - "version": "VERSION_4010" + "name": "transformX12", + "inputConversion": { + "fromFormat": "X12", + "formatOptions": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } } }, - "fileFormat": "JSON", - "mappingTemplate": "$", - "modifiedAt": "2023-11-01T21:51:05.504Z", - "name": "transformJSON", - "sampleDocument": "s3://test-bucket/sampleDoc.txt", + "mapping": { + "templateLanguage": "JSONATA", + "template": "{}" + }, + "sampleDocuments": { + "bucketName": "test-bucket", + "keys": [ + { + "input": "sampleDoc.txt" + } + ] + }, "status": "inactive", "transformerArn": "arn:aws:b2bi:us-west-2:123456789012:transformer/tr-974c129999f84d8c9", "transformerId": "tr-974c129999f84d8c9" @@ -2753,51 +3137,88 @@ "smithy.api#required": {} } }, - "fileFormat": { - "target": "com.amazonaws.b2bi#FileFormat", + "status": { + "target": "com.amazonaws.b2bi#TransformerStatus", "traits": { - "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

", + "smithy.api#documentation": "

Returns the state of the newly created transformer. The transformer can be either\n active or inactive. For the transformer to be used in a\n capability, its status must active.

", "smithy.api#required": {} } }, - "mappingTemplate": { - "target": "com.amazonaws.b2bi#MappingTemplate", + "createdAt": { + "target": "com.amazonaws.b2bi#CreatedDate", "traits": { - "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", + "smithy.api#documentation": "

Returns a timestamp for creation date and time of the transformer.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.b2bi#TransformerStatus", + "modifiedAt": { + "target": "com.amazonaws.b2bi#ModifiedDate", "traits": { - "smithy.api#documentation": "

Returns the state of the newly created transformer. The transformer can be either\n active or inactive. For the transformer to be used in a\n capability, its status must active.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Returns a timestamp for last time the transformer was modified.

" + } + }, + "fileFormat": { + "target": "com.amazonaws.b2bi#FileFormat", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

" + } + }, + "mappingTemplate": { + "target": "com.amazonaws.b2bi#MappingTemplate", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" } }, "ediType": { "target": "com.amazonaws.b2bi#EdiType", "traits": { - "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

", - "smithy.api#required": {} + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" } }, "sampleDocument": { "target": "com.amazonaws.b2bi#FileLocation", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" } }, - "createdAt": { - "target": "com.amazonaws.b2bi#CreatedDate", + "inputConversion": { + "target": "com.amazonaws.b2bi#InputConversion", "traits": { - "smithy.api#documentation": "

Returns a timestamp for creation date and time of the transformer.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Returns the InputConversion object, which contains the format options for the inbound transformation.

" } }, - "modifiedAt": { - "target": "com.amazonaws.b2bi#ModifiedDate", + "mapping": { + "target": "com.amazonaws.b2bi#Mapping", "traits": { - "smithy.api#documentation": "

Returns a timestamp for last time the transformer was modified.

" + "smithy.api#documentation": "

Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).

" + } + }, + "outputConversion": { + "target": "com.amazonaws.b2bi#OutputConversion", + "traits": { + "smithy.api#documentation": "

Returns the OutputConversion object, which contains the format options for the outbound transformation.

" + } + }, + "sampleDocuments": { + "target": "com.amazonaws.b2bi#SampleDocuments", + "traits": { + "smithy.api#documentation": "

Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" } } }, @@ -2805,6 +3226,45 @@ "smithy.api#output": {} } }, + "com.amazonaws.b2bi#InputConversion": { + "type": "structure", + "members": { + "fromFormat": { + "target": "com.amazonaws.b2bi#FromFormat", + "traits": { + "smithy.api#documentation": "

The format for the transformer input: currently on X12 is supported.

", + "smithy.api#required": {} + } + }, + "formatOptions": { + "target": "com.amazonaws.b2bi#FormatOptions", + "traits": { + "smithy.api#documentation": "

A structure that contains the formatting options for an inbound transformer.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the input formatting options for an inbound transformer (takes an X12-formatted\n EDI document as input and converts it to JSON or XML.

" + } + }, + "com.amazonaws.b2bi#InputFileSource": { + "type": "union", + "members": { + "fileContent": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Specify the input contents, as a string, for the source of an outbound transformation.

", + "smithy.api#length": { + "min": 1, + "max": 5000000 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The input file to use for an outbound transformation.

" + } + }, "com.amazonaws.b2bi#InstructionsDocuments": { "type": "list", "member": { @@ -2841,6 +3301,12 @@ "smithy.api#retryable": {} } }, + "com.amazonaws.b2bi#KeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.b2bi#SampleDocumentKeys" + } + }, "com.amazonaws.b2bi#ListCapabilities": { "type": "operation", "input": { @@ -3261,7 +3727,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the available transformers. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", + "smithy.api#documentation": "

Lists the available transformers. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively,\n a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

", "smithy.api#examples": [ { "title": "Sample ListTransformers call", @@ -3375,6 +3841,27 @@ } } }, + "com.amazonaws.b2bi#Mapping": { + "type": "structure", + "members": { + "templateLanguage": { + "target": "com.amazonaws.b2bi#MappingTemplateLanguage", + "traits": { + "smithy.api#documentation": "

The transformation language for the template, either XSLT or JSONATA.

", + "smithy.api#required": {} + } + }, + "template": { + "target": "com.amazonaws.b2bi#MappingTemplate", + "traits": { + "smithy.api#documentation": "

A string that represents the mapping template, in the transformation language specified in templateLanguage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" + } + }, "com.amazonaws.b2bi#MappingTemplate": { "type": "string", "traits": { @@ -3384,6 +3871,40 @@ } } }, + "com.amazonaws.b2bi#MappingTemplateLanguage": { + "type": "enum", + "members": { + "XSLT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "XSLT" + } + }, + "JSONATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JSONATA" + } + } + } + }, + "com.amazonaws.b2bi#MappingType": { + "type": "enum", + "members": { + "JSONATA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JSONATA" + } + }, + "XSLT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "XSLT" + } + } + } + }, "com.amazonaws.b2bi#MaxResults": { "type": "integer", "traits": { @@ -3399,6 +3920,52 @@ "smithy.api#timestampFormat": "date-time" } }, + "com.amazonaws.b2bi#OutboundEdiOptions": { + "type": "union", + "members": { + "x12": { + "target": "com.amazonaws.b2bi#X12Envelope", + "traits": { + "smithy.api#documentation": "

A structure that contains an X12 envelope structure.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A container for outbound EDI options.

" + } + }, + "com.amazonaws.b2bi#OutputConversion": { + "type": "structure", + "members": { + "toFormat": { + "target": "com.amazonaws.b2bi#ToFormat", + "traits": { + "smithy.api#documentation": "

The format for the output from an outbound transformer: only X12 is currently supported.

", + "smithy.api#required": {} + } + }, + "formatOptions": { + "target": "com.amazonaws.b2bi#FormatOptions", + "traits": { + "smithy.api#documentation": "

A structure that contains the X12 transaction set and version for the transformer output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the formatting options for an outbound transformer (takes JSON or XML as input and converts it to an EDI document (currently only X12 format is supported).

" + } + }, + "com.amazonaws.b2bi#OutputSampleFileSource": { + "type": "union", + "members": { + "fileLocation": { + "target": "com.amazonaws.b2bi#S3Location" + } + }, + "traits": { + "smithy.api#documentation": "

Container for the location of a sample file used for outbound transformations.

" + } + }, "com.amazonaws.b2bi#PageToken": { "type": "string", "traits": { @@ -3505,6 +4072,9 @@ "smithy.api#documentation": "

Returns one or more capabilities associated with this partnership.

" } }, + "capabilityOptions": { + "target": "com.amazonaws.b2bi#CapabilityOptions" + }, "tradingPartnerId": { "target": "com.amazonaws.b2bi#TradingPartnerId", "traits": { @@ -3717,7 +4287,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the details for the Amazon S3 file location that is being used with Amazon Web Services B2BI Data Interchange. File\n locations in Amazon S3 are identified using a combination of the bucket and key.

" + "smithy.api#documentation": "

Specifies the details for the Amazon S3 file location that is being used with Amazon Web Services B2B Data Interchange. File\n locations in Amazon S3 are identified using a combination of the bucket and key.

" } }, "com.amazonaws.b2bi#S3LocationList": { @@ -3726,6 +4296,48 @@ "target": "com.amazonaws.b2bi#S3Location" } }, + "com.amazonaws.b2bi#SampleDocumentKeys": { + "type": "structure", + "members": { + "input": { + "target": "com.amazonaws.b2bi#S3Key", + "traits": { + "smithy.api#documentation": "

An array of keys for your input sample documents.

" + } + }, + "output": { + "target": "com.amazonaws.b2bi#S3Key", + "traits": { + "smithy.api#documentation": "

An array of keys for your output sample documents.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An array of the Amazon S3 keys used to identify the location for your sample documents.

" + } + }, + "com.amazonaws.b2bi#SampleDocuments": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.b2bi#BucketName", + "traits": { + "smithy.api#documentation": "

Contains the Amazon S3 bucket that is used to hold your sample documents.

", + "smithy.api#required": {} + } + }, + "keys": { + "target": "com.amazonaws.b2bi#KeyList", + "traits": { + "smithy.api#documentation": "

Contains an array of the Amazon S3 keys used to identify the location for your sample documents.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" + } + }, "com.amazonaws.b2bi#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -3799,7 +4411,7 @@ } ], "traits": { - "smithy.api#documentation": "

Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2BI Data Interchange.

\n

If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just\n create and configure a transformer, and then run the StartTransformerJob API to process your files.

", + "smithy.api#documentation": "

Runs a job, using a transformer, to parse input EDI (electronic data interchange) file into the output structures used by Amazon Web Services B2B Data Interchange.

\n

If you only want to transform EDI (electronic data interchange) documents, you don't need to create profiles, partnerships or capabilities. Just\n create and configure a transformer, and then run the StartTransformerJob API to process your files.

", "smithy.api#examples": [ { "title": "Sample StartTransformerJob call", @@ -4010,13 +4622,24 @@ } } }, - "com.amazonaws.b2bi#TestMapping": { + "com.amazonaws.b2bi#TemplateDetails": { + "type": "union", + "members": { + "x12": { + "target": "com.amazonaws.b2bi#X12Details" + } + }, + "traits": { + "smithy.api#documentation": "

A data structure that contains the information to use when generating a mapping template.

" + } + }, + "com.amazonaws.b2bi#TestConversion": { "type": "operation", "input": { - "target": "com.amazonaws.b2bi#TestMappingRequest" + "target": "com.amazonaws.b2bi#TestConversionRequest" }, "output": { - "target": "com.amazonaws.b2bi#TestMappingResponse" + "target": "com.amazonaws.b2bi#TestConversionResponse" }, "errors": [ { @@ -4036,17 +4659,121 @@ } ], "traits": { - "smithy.api#documentation": "

Maps the input file according to the provided template file. The API call downloads the file contents from the Amazon S3 location, and passes the contents in as a string, to the inputFileContent parameter.

", + "smithy.api#documentation": "

This operation mimics the latter half of a typical Outbound EDI request. It takes an input JSON/XML in the B2Bi shape as input, converts it to an X12 EDI string, and return that string.

", "smithy.api#examples": [ { - "title": "Sample TestMapping call", + "title": "Sample TestConversion call", "input": { - "fileFormat": "JSON", - "inputFileContent": "Sample file content", - "mappingTemplate": "$" + "source": { + "fileFormat": "JSON", + "inputFile": { + "fileContent": "Sample file content" + } + }, + "target": { + "fileFormat": "X12", + "formatDetails": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } + } + } }, "output": { - "mappedFileContent": "Sample file content" + "convertedFileContent": "Sample converted file content", + "validationMessages": [] + } + } + ], + "smithy.api#http": { + "code": 200, + "uri": "/testconversion", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.b2bi#TestConversionRequest": { + "type": "structure", + "members": { + "source": { + "target": "com.amazonaws.b2bi#ConversionSource", + "traits": { + "smithy.api#documentation": "

Specify the source file for an outbound EDI request.

", + "smithy.api#required": {} + } + }, + "target": { + "target": "com.amazonaws.b2bi#ConversionTarget", + "traits": { + "smithy.api#documentation": "

Specify the format (X12 is the only currently supported format), and other details for the conversion target.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.b2bi#TestConversionResponse": { + "type": "structure", + "members": { + "convertedFileContent": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Returns the converted file content.

", + "smithy.api#required": {} + } + }, + "validationMessages": { + "target": "com.amazonaws.b2bi#ValidationMessages", + "traits": { + "smithy.api#documentation": "

Returns an array of strings, each containing a message that Amazon Web Services B2B Data Interchange generates during the conversion.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.b2bi#TestMapping": { + "type": "operation", + "input": { + "target": "com.amazonaws.b2bi#TestMappingRequest" + }, + "output": { + "target": "com.amazonaws.b2bi#TestMappingResponse" + }, + "errors": [ + { + "target": "com.amazonaws.b2bi#AccessDeniedException" + }, + { + "target": "com.amazonaws.b2bi#InternalServerException" + }, + { + "target": "com.amazonaws.b2bi#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.b2bi#ThrottlingException" + }, + { + "target": "com.amazonaws.b2bi#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Maps the input file according to the provided template file. The API call downloads the file contents from the Amazon S3 location, and passes the contents in as a string, to the inputFileContent parameter.

", + "smithy.api#examples": [ + { + "title": "Sample TestMapping call", + "input": { + "fileFormat": "JSON", + "inputFileContent": "Sample file content", + "mappingTemplate": "$" + }, + "output": { + "mappedFileContent": "Sample file content" } } ], @@ -4080,7 +4807,7 @@ "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

\n \n

This parameter is available for backwards compatibility. Use the Mapping data type instead.

\n
", "smithy.api#required": {} } }, @@ -4235,6 +4962,17 @@ "smithy.api#retryable": {} } }, + "com.amazonaws.b2bi#ToFormat": { + "type": "enum", + "members": { + "X12": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "X12" + } + } + } + }, "com.amazonaws.b2bi#TradingPartnerId": { "type": "string", "traits": { @@ -4337,7 +5075,8 @@ "smithy.api#length": { "min": 1, "max": 254 - } + }, + "smithy.api#pattern": "^[a-zA-Z0-9_-]{1,512}$" } }, "com.amazonaws.b2bi#TransformerStatus": { @@ -4374,56 +5113,93 @@ "smithy.api#required": {} } }, - "fileFormat": { - "target": "com.amazonaws.b2bi#FileFormat", + "status": { + "target": "com.amazonaws.b2bi#TransformerStatus", "traits": { - "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

", + "smithy.api#documentation": "

Returns the state of the newly created transformer. The transformer can be either\n active or inactive. For the transformer to be used in a\n capability, its status must active.

", "smithy.api#required": {} } }, - "mappingTemplate": { - "target": "com.amazonaws.b2bi#MappingTemplate", + "createdAt": { + "target": "com.amazonaws.b2bi#CreatedDate", "traits": { - "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", + "smithy.api#documentation": "

Returns a timestamp indicating when the transformer was created. For example,\n 2023-07-20T19:58:44.624Z.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.b2bi#TransformerStatus", + "modifiedAt": { + "target": "com.amazonaws.b2bi#ModifiedDate", "traits": { - "smithy.api#documentation": "

Returns the state of the newly created transformer. The transformer can be either\n active or inactive. For the transformer to be used in a\n capability, its status must active.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Returns a timestamp representing the date and time for the most recent change for the transformer object.

" + } + }, + "fileFormat": { + "target": "com.amazonaws.b2bi#FileFormat", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

" + } + }, + "mappingTemplate": { + "target": "com.amazonaws.b2bi#MappingTemplate", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" } }, "ediType": { "target": "com.amazonaws.b2bi#EdiType", "traits": { - "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

", - "smithy.api#required": {} + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" } }, "sampleDocument": { "target": "com.amazonaws.b2bi#FileLocation", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" } }, - "createdAt": { - "target": "com.amazonaws.b2bi#CreatedDate", + "inputConversion": { + "target": "com.amazonaws.b2bi#InputConversion", "traits": { - "smithy.api#documentation": "

Returns a timestamp indicating when the transformer was created. For example,\n 2023-07-20T19:58:44.624Z.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Returns a structure that contains the format options for the transformation.

" } }, - "modifiedAt": { - "target": "com.amazonaws.b2bi#ModifiedDate", + "mapping": { + "target": "com.amazonaws.b2bi#Mapping", "traits": { - "smithy.api#documentation": "

Returns a timestamp representing the date and time for the most recent change for the transformer object.

" + "smithy.api#documentation": "

Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).

" + } + }, + "outputConversion": { + "target": "com.amazonaws.b2bi#OutputConversion", + "traits": { + "smithy.api#documentation": "

Returns the OutputConversion object, which contains the format options for the outbound transformation.

" + } + }, + "sampleDocuments": { + "target": "com.amazonaws.b2bi#SampleDocuments", + "traits": { + "smithy.api#documentation": "

Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the details for a transformer object. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", + "smithy.api#documentation": "

Contains the details for a transformer object. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively,\n a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

", "smithy.api#references": [ { "resource": "com.amazonaws.b2bi#Transformer" @@ -4796,6 +5572,12 @@ "traits": { "smithy.api#documentation": "

List of the capabilities associated with this partnership.

" } + }, + "capabilityOptions": { + "target": "com.amazonaws.b2bi#CapabilityOptions", + "traits": { + "smithy.api#documentation": "

To update, specify the structure that contains the details for the associated capabilities.

" + } } }, "traits": { @@ -4850,6 +5632,12 @@ "smithy.api#documentation": "

Returns one or more capabilities associated with this partnership.

" } }, + "capabilityOptions": { + "target": "com.amazonaws.b2bi#CapabilityOptions", + "traits": { + "smithy.api#documentation": "

Returns the structure that contains the details for the associated capabilities.

" + } + }, "tradingPartnerId": { "target": "com.amazonaws.b2bi#TradingPartnerId", "traits": { @@ -5093,39 +5881,63 @@ "b2bi:TagResource", "b2bi:UntagResource" ], - "smithy.api#documentation": "

Updates the specified parameters for a transformer. A transformer\n describes how to process the incoming EDI documents and extract the necessary\n information to the output file.

", + "smithy.api#documentation": "

Updates the specified parameters for a transformer. A transformer can take an EDI file as input and transform it into a JSON-or XML-formatted document. Alternatively,\n a transformer can take a JSON-or XML-formatted document as input and transform it into an EDI file.

", "smithy.api#examples": [ { "title": "Sample UpdateTransformer call", "input": { - "ediType": { - "x12Details": { - "transactionSet": "X12_110", - "version": "VERSION_4010" + "inputConversion": { + "fromFormat": "X12", + "formatOptions": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } } }, - "fileFormat": "JSON", - "mappingTemplate": "{}", - "name": "transformJSON", - "sampleDocument": "s3://test-bucket/sampleDoc.txt", + "mapping": { + "templateLanguage": "JSONATA", + "template": "{}" + }, + "sampleDocuments": { + "bucketName": "test-bucket", + "keys": [ + { + "input": "sampleDoc.txt" + } + ] + }, + "name": "transformX12", "status": "inactive", "transformerId": "tr-974c129999f84d8c9" }, "output": { "createdAt": "2023-11-01T21:51:05.504Z", - "ediType": { - "x12Details": { - "transactionSet": "X12_110", - "version": "VERSION_4010" + "modifiedAt": "2023-11-02T22:31:05.504Z", + "name": "transformX12", + "inputConversion": { + "fromFormat": "X12", + "formatOptions": { + "x12": { + "transactionSet": "X12_110", + "version": "VERSION_4010" + } } }, - "fileFormat": "JSON", - "mappingTemplate": "$", - "modifiedAt": "2023-11-01T21:51:05.504Z", - "name": "transformJSON", - "sampleDocument": "s3://test-bucket/sampleDoc.txt", + "mapping": { + "templateLanguage": "JSONATA", + "template": "{}" + }, + "sampleDocuments": { + "bucketName": "test-bucket", + "keys": [ + { + "input": "sampleDoc.txt" + } + ] + }, "status": "inactive", - "transformerArn": "arn:aws:b2bi:us-west-2:607686414464:transformer/tr-974c129999f84d8c9", + "transformerArn": "arn:aws:b2bi:us-west-2:123456789012:transformer/tr-974c129999f84d8c9", "transformerId": "tr-974c129999f84d8c9" } } @@ -5155,35 +5967,71 @@ "smithy.api#documentation": "

Specify a new name for the transformer, if you want to update it.

" } }, + "status": { + "target": "com.amazonaws.b2bi#TransformerStatus", + "traits": { + "smithy.api#documentation": "

Specifies the transformer's status. You can update the state of the transformer, from active to inactive, or inactive to active.

" + } + }, "fileFormat": { "target": "com.amazonaws.b2bi#FileFormat", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Specifies that the currently supported file formats for EDI transformations are JSON and XML.

" } }, "mappingTemplate": { "target": "com.amazonaws.b2bi#MappingTemplate", "traits": { - "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" - } - }, - "status": { - "target": "com.amazonaws.b2bi#TransformerStatus", - "traits": { - "smithy.api#documentation": "

Specifies the transformer's status. You can update the state of the transformer, from active to inactive, or inactive to active.

" + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Specifies the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

\n \n

This parameter is available for backwards compatibility. Use the Mapping data type instead.

\n
" } }, "ediType": { "target": "com.amazonaws.b2bi#EdiType", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Specifies the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" } }, "sampleDocument": { "target": "com.amazonaws.b2bi#FileLocation", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Specifies a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" } + }, + "inputConversion": { + "target": "com.amazonaws.b2bi#InputConversion", + "traits": { + "smithy.api#documentation": "

To update, specify the InputConversion object, which contains the format options for the inbound transformation.

" + } + }, + "mapping": { + "target": "com.amazonaws.b2bi#Mapping", + "traits": { + "smithy.api#documentation": "

Specify the structure that contains the mapping template and its language (either XSLT or JSONATA).

" + } + }, + "outputConversion": { + "target": "com.amazonaws.b2bi#OutputConversion", + "traits": { + "smithy.api#documentation": "

To update, specify the OutputConversion object, which contains the format options for the outbound transformation.

" + } + }, + "sampleDocuments": { + "target": "com.amazonaws.b2bi#SampleDocuments", + "traits": { + "smithy.api#documentation": "

Specify a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" + } } }, "traits": { @@ -5214,52 +6062,89 @@ "smithy.api#required": {} } }, - "fileFormat": { - "target": "com.amazonaws.b2bi#FileFormat", + "status": { + "target": "com.amazonaws.b2bi#TransformerStatus", "traits": { - "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

", + "smithy.api#documentation": "

Returns the state of the newly created transformer. The transformer can be either\n active or inactive. For the transformer to be used in a\n capability, its status must active.

", "smithy.api#required": {} } }, - "mappingTemplate": { - "target": "com.amazonaws.b2bi#MappingTemplate", + "createdAt": { + "target": "com.amazonaws.b2bi#CreatedDate", "traits": { - "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

", + "smithy.api#documentation": "

Returns a timestamp for creation date and time of the transformer.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.b2bi#TransformerStatus", + "modifiedAt": { + "target": "com.amazonaws.b2bi#ModifiedDate", "traits": { - "smithy.api#documentation": "

Returns the state of the newly created transformer. The transformer can be either\n active or inactive. For the transformer to be used in a\n capability, its status must active.

", + "smithy.api#documentation": "

Returns a timestamp for last time the transformer was modified.

", "smithy.api#required": {} } }, + "fileFormat": { + "target": "com.amazonaws.b2bi#FileFormat", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns that the currently supported file formats for EDI transformations are JSON and XML.

" + } + }, + "mappingTemplate": { + "target": "com.amazonaws.b2bi#MappingTemplate", + "traits": { + "smithy.api#addedDefault": {}, + "smithy.api#default": "NOT_USED", + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the mapping template for the transformer. This template is used to map the parsed EDI file using JSONata or XSLT.

" + } + }, "ediType": { "target": "com.amazonaws.b2bi#EdiType", "traits": { - "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

", - "smithy.api#required": {} + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, + "smithy.api#documentation": "

Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.

" } }, "sampleDocument": { "target": "com.amazonaws.b2bi#FileLocation", "traits": { + "smithy.api#deprecated": { + "message": "This is a legacy trait. Please use input-conversion or output-conversion." + }, "smithy.api#documentation": "

Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.

" } }, - "createdAt": { - "target": "com.amazonaws.b2bi#CreatedDate", + "inputConversion": { + "target": "com.amazonaws.b2bi#InputConversion", "traits": { - "smithy.api#documentation": "

Returns a timestamp for creation date and time of the transformer.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Returns the InputConversion object, which contains the format options for the inbound transformation.

" } }, - "modifiedAt": { - "target": "com.amazonaws.b2bi#ModifiedDate", + "mapping": { + "target": "com.amazonaws.b2bi#Mapping", "traits": { - "smithy.api#documentation": "

Returns a timestamp for last time the transformer was modified.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).

" + } + }, + "outputConversion": { + "target": "com.amazonaws.b2bi#OutputConversion", + "traits": { + "smithy.api#documentation": "

Returns the OutputConversion object, which contains the format options for the outbound transformation.

" + } + }, + "sampleDocuments": { + "target": "com.amazonaws.b2bi#SampleDocuments", + "traits": { + "smithy.api#documentation": "

Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.

" } } }, @@ -5283,6 +6168,88 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.b2bi#ValidationMessages": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.b2bi#X12AcknowledgmentRequestedCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12ApplicationReceiverCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 15 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12ApplicationSenderCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 15 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12ComponentSeparator": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#pattern": "^[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]$" + } + }, + "com.amazonaws.b2bi#X12DataElementSeparator": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#pattern": "^[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]$" + } + }, + "com.amazonaws.b2bi#X12Delimiters": { + "type": "structure", + "members": { + "componentSeparator": { + "target": "com.amazonaws.b2bi#X12ComponentSeparator", + "traits": { + "smithy.api#documentation": "

The component, or sub-element, separator. The default value is : (colon).

" + } + }, + "dataElementSeparator": { + "target": "com.amazonaws.b2bi#X12DataElementSeparator", + "traits": { + "smithy.api#documentation": "

The data element separator. The default value is * (asterisk).

" + } + }, + "segmentTerminator": { + "target": "com.amazonaws.b2bi#X12SegmentTerminator", + "traits": { + "smithy.api#documentation": "

The segment terminator. The default value is ~ (tilde).

" + } + } + }, + "traits": { + "smithy.api#documentation": "

In X12 EDI messages, delimiters are used to mark the end of segments or elements, and are defined in the interchange control header.\n The delimiters are part of the message's syntax and divide up its different elements.

" + } + }, "com.amazonaws.b2bi#X12Details": { "type": "structure", "members": { @@ -5295,7 +6262,7 @@ "version": { "target": "com.amazonaws.b2bi#X12Version", "traits": { - "smithy.api#documentation": "

Returns the version to use for the specified X12 transaction set.\n \n \n \n

" + "smithy.api#documentation": "

Returns the version to use for the specified X12 transaction set.

" } } }, @@ -5303,6 +6270,187 @@ "smithy.api#documentation": "

A structure that contains the X12 transaction set and version. The X12 structure is used when the system transforms an EDI (electronic data interchange) file.

\n \n

If an EDI input file contains more than one transaction, each transaction must have the same transaction set and version, for example 214/4010. If not, the transformer cannot parse the file.

\n
" } }, + "com.amazonaws.b2bi#X12Envelope": { + "type": "structure", + "members": { + "common": { + "target": "com.amazonaws.b2bi#X12OutboundEdiHeaders", + "traits": { + "smithy.api#documentation": "

A container for the X12 outbound EDI headers.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A wrapper structure for an X12 definition object.

\n

the X12 envelope ensures the integrity of the data and the efficiency of the information exchange. The X12 message structure has hierarchical levels. From highest to the lowest, they are:

\n
    \n
  • \n

    Interchange Envelope

    \n
  • \n
  • \n

    Functional Group

    \n
  • \n
  • \n

    Transaction Set

    \n
  • \n
" + } + }, + "com.amazonaws.b2bi#X12FunctionalGroupHeaders": { + "type": "structure", + "members": { + "applicationSenderCode": { + "target": "com.amazonaws.b2bi#X12ApplicationSenderCode", + "traits": { + "smithy.api#documentation": "

A value representing the code used to identify the party transmitting a message, at position GS-02.

" + } + }, + "applicationReceiverCode": { + "target": "com.amazonaws.b2bi#X12ApplicationReceiverCode", + "traits": { + "smithy.api#documentation": "

A value representing the code used to identify the party receiving a message, at position GS-03.

" + } + }, + "responsibleAgencyCode": { + "target": "com.amazonaws.b2bi#X12ResponsibleAgencyCode", + "traits": { + "smithy.api#documentation": "

A code that identifies the issuer of the standard, at position GS-07.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Part of the X12 message structure. These are the functional group headers for the X12 EDI object.

" + } + }, + "com.amazonaws.b2bi#X12IdQualifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 2 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12InterchangeControlHeaders": { + "type": "structure", + "members": { + "senderIdQualifier": { + "target": "com.amazonaws.b2bi#X12IdQualifier", + "traits": { + "smithy.api#documentation": "

Located at position ISA-05 in the header. Qualifier for the sender ID. Together, the ID and qualifier uniquely identify the sending trading partner.

" + } + }, + "senderId": { + "target": "com.amazonaws.b2bi#X12SenderId", + "traits": { + "smithy.api#documentation": "

Located at position ISA-06 in the header. This value (along with the senderIdQualifier) identifies the sender of the interchange.

" + } + }, + "receiverIdQualifier": { + "target": "com.amazonaws.b2bi#X12IdQualifier", + "traits": { + "smithy.api#documentation": "

Located at position ISA-07 in the header. Qualifier for the receiver ID. Together, the ID and qualifier uniquely identify the receiving trading partner.

" + } + }, + "receiverId": { + "target": "com.amazonaws.b2bi#X12ReceiverId", + "traits": { + "smithy.api#documentation": "

Located at position ISA-08 in the header. This value (along with the receiverIdQualifier) identifies the intended recipient of the interchange.

" + } + }, + "repetitionSeparator": { + "target": "com.amazonaws.b2bi#X12RepetitionSeparator", + "traits": { + "smithy.api#documentation": "

Located at position ISA-11 in the header. This string makes it easier when you need to group similar adjacent element values together without using extra segments.

\n \n

This parameter is only honored for version greater than 401 (VERSION_4010 and higher).

\n

For versions less than 401, this field is called StandardsId, in which case our service\n sets the value to U.

\n
" + } + }, + "acknowledgmentRequestedCode": { + "target": "com.amazonaws.b2bi#X12AcknowledgmentRequestedCode", + "traits": { + "smithy.api#documentation": "

Located at position ISA-14 in the header. The value \"1\" indicates that the sender is requesting an interchange acknowledgment at receipt of the interchange. The value \"0\" is used otherwise.

" + } + }, + "usageIndicatorCode": { + "target": "com.amazonaws.b2bi#X12UsageIndicatorCode", + "traits": { + "smithy.api#documentation": "

Located at position ISA-15 in the header. Specifies how this interchange is being used:

\n
    \n
  • \n

    \n T indicates this interchange is for testing.

    \n
  • \n
  • \n

    \n P indicates this interchange is for production.

    \n
  • \n
  • \n

    \n I indicates this interchange is informational.

    \n
  • \n
" + } + } + }, + "traits": { + "smithy.api#documentation": "

In X12, the Interchange Control Header is the first segment of an EDI document and is\n part of the Interchange Envelope. It contains information about the sender and receiver,\n the date and time of transmission, and the X12 version being used. It also includes\n delivery information, such as the sender and receiver IDs.

" + } + }, + "com.amazonaws.b2bi#X12OutboundEdiHeaders": { + "type": "structure", + "members": { + "interchangeControlHeaders": { + "target": "com.amazonaws.b2bi#X12InterchangeControlHeaders", + "traits": { + "smithy.api#documentation": "

In X12 EDI messages, delimiters are used to mark the end of segments or elements, and are defined in the interchange control header.

" + } + }, + "functionalGroupHeaders": { + "target": "com.amazonaws.b2bi#X12FunctionalGroupHeaders", + "traits": { + "smithy.api#documentation": "

The functional group headers for the X12 object.

" + } + }, + "delimiters": { + "target": "com.amazonaws.b2bi#X12Delimiters", + "traits": { + "smithy.api#documentation": "

The delimiters, for example semicolon (;), that separates sections of the headers for the X12 object.

" + } + }, + "validateEdi": { + "target": "com.amazonaws.b2bi#X12ValidateEdi", + "traits": { + "smithy.api#documentation": "

Specifies whether or not to validate the EDI for this X12 object: TRUE or FALSE.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing the details for an outbound EDI object.

" + } + }, + "com.amazonaws.b2bi#X12ReceiverId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 15, + "max": 15 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12RepetitionSeparator": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.b2bi#X12ResponsibleAgencyCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12SegmentTerminator": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#pattern": "^[!&'()*+,\\-./:;?=%@\\[\\]_{}|<>~^`\"]$" + } + }, + "com.amazonaws.b2bi#X12SenderId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 15, + "max": 15 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, "com.amazonaws.b2bi#X12TransactionSet": { "type": "enum", "members": { @@ -5758,6 +6906,19 @@ } } }, + "com.amazonaws.b2bi#X12UsageIndicatorCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]*$" + } + }, + "com.amazonaws.b2bi#X12ValidateEdi": { + "type": "boolean" + }, "com.amazonaws.b2bi#X12Version": { "type": "enum", "members": { diff --git a/models/bedrock-agent-runtime.json b/models/bedrock-agent-runtime.json index b5b293bf4c..06389032de 100644 --- a/models/bedrock-agent-runtime.json +++ b/models/bedrock-agent-runtime.json @@ -1504,26 +1504,26 @@ "modelArn": { "target": "com.amazonaws.bedrockagentruntime#BedrockModelArn", "traits": { - "smithy.api#documentation": "

The modelArn used with the external source wrapper object in the retrieveAndGenerate function.

", + "smithy.api#documentation": "

The model Amazon Resource Name (ARN) for the external source wrapper object in the retrieveAndGenerate function.

", "smithy.api#required": {} } }, "sources": { "target": "com.amazonaws.bedrockagentruntime#ExternalSources", "traits": { - "smithy.api#documentation": "

The document used with the external source wrapper object in the retrieveAndGenerate function.

", + "smithy.api#documentation": "

The document for the external source wrapper object in the retrieveAndGenerate function.

", "smithy.api#required": {} } }, "generationConfiguration": { "target": "com.amazonaws.bedrockagentruntime#ExternalSourcesGenerationConfiguration", "traits": { - "smithy.api#documentation": "

The prompt used with the external source wrapper object with the retrieveAndGenerate function.

" + "smithy.api#documentation": "

The prompt used with the external source wrapper object with the retrieveAndGenerate function.

" } } }, "traits": { - "smithy.api#documentation": "

The configurations of the external source wrapper object in the retrieveAndGenerate function.

" + "smithy.api#documentation": "

The configurations of the external source wrapper object in the retrieveAndGenerate function.

" } }, "com.amazonaws.bedrockagentruntime#FailureReasonString": { @@ -4217,6 +4217,12 @@ "traits": { "smithy.api#documentation": "

Details about the response from the Lambda parsing of the output of the post-processing step.

" } + }, + "rawResponse": { + "target": "com.amazonaws.bedrockagentruntime#RawResponse" + }, + "metadata": { + "target": "com.amazonaws.bedrockagentruntime#Metadata" } }, "traits": { @@ -4274,6 +4280,12 @@ "traits": { "smithy.api#documentation": "

Details about the response from the Lambda parsing of the output of the pre-processing step.

" } + }, + "rawResponse": { + "target": "com.amazonaws.bedrockagentruntime#RawResponse" + }, + "metadata": { + "target": "com.amazonaws.bedrockagentruntime#Metadata" } }, "traits": { @@ -5038,20 +5050,20 @@ "type": { "target": "com.amazonaws.bedrockagentruntime#RetrieveAndGenerateType", "traits": { - "smithy.api#documentation": "

The type of resource that is queried by the request.

", + "smithy.api#documentation": "

The type of resource that contains your data for retrieving information and generating responses.

\n

If you choose ot use EXTERNAL_SOURCES, then currently only Claude 3 Sonnet models for knowledge bases are supported.

", "smithy.api#required": {} } }, "knowledgeBaseConfiguration": { "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseRetrieveAndGenerateConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about the resource being queried.

" + "smithy.api#documentation": "

Contains details about the knowledge base for retrieving information and generating responses.

" } }, "externalSourcesConfiguration": { "target": "com.amazonaws.bedrockagentruntime#ExternalSourcesRetrieveAndGenerateConfiguration", "traits": { - "smithy.api#documentation": "

The configuration used with the external source wrapper object in the retrieveAndGenerate function.

" + "smithy.api#documentation": "

The configuration for the external source wrapper object in the retrieveAndGenerate function.

" } } }, diff --git a/models/bedrock-agent.json b/models/bedrock-agent.json index 228cf0b79a..f8018ec572 100644 --- a/models/bedrock-agent.json +++ b/models/bedrock-agent.json @@ -2617,7 +2617,7 @@ "foundationModel": { "target": "com.amazonaws.bedrockagent#ModelIdentifier", "traits": { - "smithy.api#documentation": "

The foundation model to be used for orchestration by the agent you create.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create.

" } }, "description": { @@ -3319,7 +3319,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a knowledge base that contains data sources from which information can be queried and used by LLMs. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up your data for ingestion.

\n \n

If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base.

\n
\n
    \n
  • \n

    Provide the name and an optional description.

    \n
  • \n
  • \n

    Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field.

    \n
  • \n
  • \n

    Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object.

    \n
  • \n
  • \n

    Provide the configuration for your vector store in the storageConfiguration object.

    \n \n
  • \n
", + "smithy.api#documentation": "

Creates a knowledge base. A knowledge base contains your data sources so that Large Language Models (LLMs) can use your data. To create a knowledge base, you must first set up your data sources and configure a supported vector store. For more information, see Set up a knowledge base.

\n \n

If you prefer to let Amazon Bedrock create and manage a vector store for you in Amazon OpenSearch Service, use the console. For more information, see Create a knowledge base.

\n
\n
    \n
  • \n

    Provide the name and an optional description.

    \n
  • \n
  • \n

    Provide the Amazon Resource Name (ARN) with permissions to create a knowledge base in the roleArn field.

    \n
  • \n
  • \n

    Provide the embedding model to use in the embeddingModelArn field in the knowledgeBaseConfiguration object.

    \n
  • \n
  • \n

    Provide the configuration for your vector store in the storageConfiguration object.

    \n \n
  • \n
", "smithy.api#http": { "code": 202, "method": "PUT", @@ -6691,7 +6691,7 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base that the data source was added to.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base for the data source.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7154,7 +7154,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets information about a ingestion job, in which a data source is added to a knowledge base.

", + "smithy.api#documentation": "

Gets information about a data ingestion job. Data sources are ingested into your knowledge base so that Large Lanaguage Models (LLMs) can use your data.

", "smithy.api#http": { "code": 200, "method": "GET", @@ -7172,7 +7172,7 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which the ingestion job applies.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base for the data ingestion job you want to get information on.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7180,7 +7180,7 @@ "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source in the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the data source for the data ingestion job you want to get information on.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7188,7 +7188,7 @@ "ingestionJobId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the data ingestion job you want to get information on.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7204,7 +7204,7 @@ "ingestionJob": { "target": "com.amazonaws.bedrockagent#IngestionJob", "traits": { - "smithy.api#documentation": "

Contains details about the ingestion job.

", + "smithy.api#documentation": "

Contains details about the data ingestion job.

", "smithy.api#required": {} } } @@ -7257,7 +7257,7 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which to get information.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base you want to get information on.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7564,66 +7564,66 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source is being added.

", + "smithy.api#documentation": "

The unique identifier of the knowledge for the data ingestion job.

", "smithy.api#required": {} } }, "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingested data source.

", + "smithy.api#documentation": "

The unique identifier of the data source for the data ingestion job.

", "smithy.api#required": {} } }, "ingestionJobId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the data ingestion job.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

The description of the ingestion job.

" + "smithy.api#documentation": "

The description of the data ingestion job.

" } }, "status": { "target": "com.amazonaws.bedrockagent#IngestionJobStatus", "traits": { - "smithy.api#documentation": "

The status of the ingestion job.

", + "smithy.api#documentation": "

The status of the data ingestion job.

", "smithy.api#required": {} } }, "statistics": { "target": "com.amazonaws.bedrockagent#IngestionJobStatistics", "traits": { - "smithy.api#documentation": "

Contains statistics about the ingestion job.

" + "smithy.api#documentation": "

Contains statistics about the data ingestion job.

" } }, "failureReasons": { "target": "com.amazonaws.bedrockagent#FailureReasons", "traits": { - "smithy.api#documentation": "

A list of reasons that the ingestion job failed.

" + "smithy.api#documentation": "

A list of reasons that the data ingestion job failed.

" } }, "startedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job started.

", + "smithy.api#documentation": "

The time the data ingestion job started.

\n

If you stop a data ingestion job, the startedAt time is the time the job was started before the job was stopped.

", "smithy.api#required": {} } }, "updatedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job was last updated.

", + "smithy.api#documentation": "

The time the data ingestion job was last updated.

\n

If you stop a data ingestion job, the updatedAt time is the time the job was stopped.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Contains details about an ingestion job, which converts a data source to embeddings for a vector store in knowledge base.

\n

This data type is used in the following API operations:

\n " + "smithy.api#documentation": "

Contains details about a data ingestion job. Data sources are ingested into a knowledge base so that Large Language Models (LLMs) can use your data.

\n

This data type is used in the following API operations:

\n " } }, "com.amazonaws.bedrockagent#IngestionJobFilter": { @@ -7632,27 +7632,27 @@ "attribute": { "target": "com.amazonaws.bedrockagent#IngestionJobFilterAttribute", "traits": { - "smithy.api#documentation": "

The attribute by which to filter the results.

", + "smithy.api#documentation": "

The name of field or attribute to apply the filter.

", "smithy.api#required": {} } }, "operator": { "target": "com.amazonaws.bedrockagent#IngestionJobFilterOperator", "traits": { - "smithy.api#documentation": "

The operation to carry out between the attribute and the values.

", + "smithy.api#documentation": "

The operation to apply to the field or attribute.

", "smithy.api#required": {} } }, "values": { "target": "com.amazonaws.bedrockagent#IngestionJobFilterValues", "traits": { - "smithy.api#documentation": "

A list of values for the attribute.

", + "smithy.api#documentation": "

A list of values that belong to the field or attribute.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Defines a filter by which to filter the results.

" + "smithy.api#documentation": "

The definition of a filter to filter the data.

" } }, "com.amazonaws.bedrockagent#IngestionJobFilterAttribute": { @@ -7722,6 +7722,9 @@ }, { "target": "com.amazonaws.bedrockagent#StartIngestionJob" + }, + { + "target": "com.amazonaws.bedrockagent#StopIngestionJob" } ] }, @@ -7731,20 +7734,20 @@ "attribute": { "target": "com.amazonaws.bedrockagent#IngestionJobSortByAttribute", "traits": { - "smithy.api#documentation": "

The attribute by which to sort the results.

", + "smithy.api#documentation": "

The name of field or attribute to apply sorting of data.

", "smithy.api#required": {} } }, "order": { "target": "com.amazonaws.bedrockagent#SortOrder", "traits": { - "smithy.api#documentation": "

The order by which to sort the results.

", + "smithy.api#documentation": "

The order for sorting the data.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Parameters by which to sort the results.

" + "smithy.api#documentation": "

The parameters of sorting the data.

" } }, "com.amazonaws.bedrockagent#IngestionJobSortByAttribute": { @@ -7806,7 +7809,7 @@ "target": "smithy.api#PrimitiveLong", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of source documents that was deleted.

" + "smithy.api#documentation": "

The number of source documents that were deleted.

" } }, "numberOfDocumentsFailed": { @@ -7818,7 +7821,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the statistics for the ingestion job.

" + "smithy.api#documentation": "

Contains the statistics for the data ingestion job.

" } }, "com.amazonaws.bedrockagent#IngestionJobStatus": { @@ -7847,6 +7850,18 @@ "traits": { "smithy.api#enumValue": "FAILED" } + }, + "STOPPING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPING" + } + }, + "STOPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STOPPED" + } } } }, @@ -7862,60 +7877,60 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which the data source is added.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base for the data ingestion job.

", "smithy.api#required": {} } }, "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source in the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the data source for the data ingestion job.

", "smithy.api#required": {} } }, "ingestionJobId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the ingestion job.

", + "smithy.api#documentation": "

The unique identifier of the data ingestion job.

", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

The description of the ingestion job.

" + "smithy.api#documentation": "

The description of the data ingestion job.

" } }, "status": { "target": "com.amazonaws.bedrockagent#IngestionJobStatus", "traits": { - "smithy.api#documentation": "

The status of the ingestion job.

", + "smithy.api#documentation": "

The status of the data ingestion job.

", "smithy.api#required": {} } }, "startedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job was started.

", + "smithy.api#documentation": "

The time the data ingestion job started.

", "smithy.api#required": {} } }, "updatedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the ingestion job was last updated.

", + "smithy.api#documentation": "

The time the data ingestion job was last updated.

", "smithy.api#required": {} } }, "statistics": { "target": "com.amazonaws.bedrockagent#IngestionJobStatistics", "traits": { - "smithy.api#documentation": "

Contains statistics for the ingestion job.

" + "smithy.api#documentation": "

Contains statistics for the data ingestion job.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about an ingestion job.

" + "smithy.api#documentation": "

Contains details about a data ingestion job.

" } }, "com.amazonaws.bedrockagent#InputFlowNodeConfiguration": { @@ -8041,14 +8056,14 @@ "createdAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the knowledge base was created.

", + "smithy.api#documentation": "

The time the knowledge base was created.

", "smithy.api#required": {} } }, "updatedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the knowledge base was last updated.

", + "smithy.api#documentation": "

The time the knowledge base was last updated.

", "smithy.api#required": {} } }, @@ -8085,12 +8100,12 @@ "vectorKnowledgeBaseConfiguration": { "target": "com.amazonaws.bedrockagent#VectorKnowledgeBaseConfiguration", "traits": { - "smithy.api#documentation": "

Contains details about the embeddings model that'sused to convert the data source.

" + "smithy.api#documentation": "

Contains details about the model that's used to convert the data source into vector embeddings.

" } } }, "traits": { - "smithy.api#documentation": "

Contains details about the embeddings configuration of the knowledge base.

" + "smithy.api#documentation": "

Contains details about the vector embeddings configuration of the knowledge base.

" } }, "com.amazonaws.bedrockagent#KnowledgeBaseFlowNodeConfiguration": { @@ -8104,9 +8119,9 @@ } }, "modelId": { - "target": "com.amazonaws.bedrockagent#ModelIdentifier", + "target": "com.amazonaws.bedrockagent#KnowledgeBaseModelIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the model to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

" + "smithy.api#documentation": "

The unique identifier of the model or inference profile to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array.

" } } }, @@ -8123,6 +8138,16 @@ "smithy.api#pattern": "^[0-9a-zA-Z]+$" } }, + "com.amazonaws.bedrockagent#KnowledgeBaseModelIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" + } + }, "com.amazonaws.bedrockagent#KnowledgeBaseResource": { "type": "resource", "operations": [ @@ -8299,7 +8324,7 @@ "updatedAt": { "target": "com.amazonaws.bedrockagent#DateTimestamp", "traits": { - "smithy.api#documentation": "

The time at which the knowledge base was last updated.

", + "smithy.api#documentation": "

The time the knowledge base was last updated.

", "smithy.api#required": {} } } @@ -9246,7 +9271,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the ingestion jobs for a data source and information about each of them.

", + "smithy.api#documentation": "

Lists the data ingestion jobs for a data source. The list also includes information about each job.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -9270,7 +9295,7 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base for which to return ingestion jobs.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base for the list of data ingestion jobs.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9278,7 +9303,7 @@ "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source for which to return ingestion jobs.

", + "smithy.api#documentation": "

The unique identifier of the data source for the list of data ingestion jobs.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9286,13 +9311,13 @@ "filters": { "target": "com.amazonaws.bedrockagent#IngestionJobFilters", "traits": { - "smithy.api#documentation": "

Contains a definition of a filter for which to filter the results.

" + "smithy.api#documentation": "

Contains information about the filters for filtering the data.

" } }, "sortBy": { "target": "com.amazonaws.bedrockagent#IngestionJobSortBy", "traits": { - "smithy.api#documentation": "

Contains details about how to sort the results.

" + "smithy.api#documentation": "

Contains details about how to sort the data.

" } }, "maxResults": { @@ -9318,7 +9343,7 @@ "ingestionJobSummaries": { "target": "com.amazonaws.bedrockagent#IngestionJobSummaries", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about an ingestion job.

", + "smithy.api#documentation": "

A list of data ingestion jobs with information about each job.

", "smithy.api#required": {} } }, @@ -9356,7 +9381,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the knowledge bases in an account and information about each of them.

", + "smithy.api#documentation": "

Lists the knowledge bases in an account. The list also includesinformation about each knowledge base.

", "smithy.api#http": { "code": 200, "method": "POST", @@ -9400,7 +9425,7 @@ "knowledgeBaseSummaries": { "target": "com.amazonaws.bedrockagent#KnowledgeBaseSummaries", "traits": { - "smithy.api#documentation": "

A list of objects, each of which contains information about a knowledge base.

", + "smithy.api#documentation": "

A list of knowledge bases with information about each knowledge base.

", "smithy.api#required": {} } }, @@ -10405,7 +10430,7 @@ "modelId": { "target": "com.amazonaws.bedrockagent#PromptModelIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the model to run inference with.

", + "smithy.api#documentation": "

The unique identifier of the model or inference profile to run inference with.

", "smithy.api#required": {} } }, @@ -10577,7 +10602,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)|(arn:aws(|-us-gov|-cn|-iso|-iso-b):bedrock:(|[0-9a-z-]{1,20}):(|[0-9]{12}):(model-gateway|inference-profile)/[a-zA-Z0-9-:.]+)|([a-zA-Z0-9-:.]+)$" } }, "com.amazonaws.bedrockagent#PromptModelInferenceConfiguration": { @@ -10637,7 +10662,7 @@ "overrideLambda": { "target": "com.amazonaws.bedrockagent#LambdaArn", "traits": { - "smithy.api#documentation": "

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Agents for Amazon Bedrock.

" + "smithy.api#documentation": "

The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the promptConfigurations must contain a parserMode value that is set to OVERRIDDEN. For more information, see Parser Lambda function in Amazon Bedrock Agents.

" } } }, @@ -10843,7 +10868,7 @@ "modelId": { "target": "com.amazonaws.bedrockagent#PromptModelIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the model with which to run inference on the prompt.

" + "smithy.api#documentation": "

The unique identifier of the model or inference profile with which to run inference on the prompt.

" } }, "inferenceConfiguration": { @@ -11657,7 +11682,7 @@ } ], "traits": { - "smithy.api#documentation": "

Begins an ingestion job, in which a data source is added to a knowledge base.

", + "smithy.api#documentation": "

Begins a data ingestion job. Data sources are ingested into your knowledge base so that Large Language Models (LLMs) can use your data.

", "smithy.api#http": { "code": 202, "method": "PUT", @@ -11675,7 +11700,7 @@ "knowledgeBaseId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the knowledge base to which to add the data source.

", + "smithy.api#documentation": "

The unique identifier of the knowledge base for the data ingestion job.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -11683,7 +11708,7 @@ "dataSourceId": { "target": "com.amazonaws.bedrockagent#Id", "traits": { - "smithy.api#documentation": "

The unique identifier of the data source to ingest.

", + "smithy.api#documentation": "

The unique identifier of the data source you want to ingest into your knowledge base.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -11698,7 +11723,7 @@ "description": { "target": "com.amazonaws.bedrockagent#Description", "traits": { - "smithy.api#documentation": "

A description of the ingestion job.

" + "smithy.api#documentation": "

A description of the data ingestion job.

" } } }, @@ -11712,7 +11737,7 @@ "ingestionJob": { "target": "com.amazonaws.bedrockagent#IngestionJob", "traits": { - "smithy.api#documentation": "

An object containing information about the ingestion job.

", + "smithy.api#documentation": "

Contains information about the data ingestion job.

", "smithy.api#required": {} } } @@ -11732,6 +11757,94 @@ } } }, + "com.amazonaws.bedrockagent#StopIngestionJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.bedrockagent#StopIngestionJobRequest" + }, + "output": { + "target": "com.amazonaws.bedrockagent#StopIngestionJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.bedrockagent#AccessDeniedException" + }, + { + "target": "com.amazonaws.bedrockagent#ConflictException" + }, + { + "target": "com.amazonaws.bedrockagent#InternalServerException" + }, + { + "target": "com.amazonaws.bedrockagent#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.bedrockagent#ThrottlingException" + }, + { + "target": "com.amazonaws.bedrockagent#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Stops a currently running data ingestion job. You can send a StartIngestionJob request again to ingest the rest of your data when you are ready.

", + "smithy.api#http": { + "code": 202, + "method": "POST", + "uri": "/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/{ingestionJobId}/stop" + }, + "smithy.api#idempotent": {}, + "smithy.api#tags": [ + "console" + ] + } + }, + "com.amazonaws.bedrockagent#StopIngestionJobRequest": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the knowledge base for the data ingestion job you want to stop.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "dataSourceId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data source for the data ingestion job you want to stop.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ingestionJobId": { + "target": "com.amazonaws.bedrockagent#Id", + "traits": { + "smithy.api#documentation": "

The unique identifier of the data ingestion job you want to stop.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.bedrockagent#StopIngestionJobResponse": { + "type": "structure", + "members": { + "ingestionJob": { + "target": "com.amazonaws.bedrockagent#IngestionJob", + "traits": { + "smithy.api#documentation": "

Contains information about the stopped data ingestion job.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.bedrockagent#StopSequences": { "type": "list", "member": { diff --git a/models/bedrock-runtime.json b/models/bedrock-runtime.json index 8dd29f993f..c5ac829321 100644 --- a/models/bedrock-runtime.json +++ b/models/bedrock-runtime.json @@ -832,6 +832,12 @@ "smithy.api#documentation": "

The assessment details in the response from the guardrail.

", "smithy.api#required": {} } + }, + "guardrailCoverage": { + "target": "com.amazonaws.bedrockruntime#GuardrailCoverage", + "traits": { + "smithy.api#documentation": "

The guardrail coverage details in the apply guardrail response.

" + } } }, "traits": { @@ -1636,6 +1642,12 @@ "traits": { "smithy.api#documentation": "

The contextual grounding policy used for the guardrail assessment.

" } + }, + "invocationMetrics": { + "target": "com.amazonaws.bedrockruntime#GuardrailInvocationMetrics", + "traits": { + "smithy.api#documentation": "

The invocation metrics for the guardrail assessment.

" + } } }, "traits": { @@ -1732,6 +1744,12 @@ "smithy.api#required": {} } }, + "filterStrength": { + "target": "com.amazonaws.bedrockruntime#GuardrailContentFilterStrength", + "traits": { + "smithy.api#documentation": "

The filter strength setting for the guardrail content filter.

" + } + }, "action": { "target": "com.amazonaws.bedrockruntime#GuardrailContentPolicyAction", "traits": { @@ -1779,6 +1797,35 @@ "target": "com.amazonaws.bedrockruntime#GuardrailContentFilter" } }, + "com.amazonaws.bedrockruntime#GuardrailContentFilterStrength": { + "type": "enum", + "members": { + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + }, + "LOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LOW" + } + }, + "MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MEDIUM" + } + }, + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HIGH" + } + } + } + }, "com.amazonaws.bedrockruntime#GuardrailContentFilterType": { "type": "enum", "members": { @@ -2060,6 +2107,20 @@ "smithy.api#documentation": "

A text block that contains text that you want to assess with a guardrail. For more information, see GuardrailConverseContentBlock.

" } }, + "com.amazonaws.bedrockruntime#GuardrailCoverage": { + "type": "structure", + "members": { + "textCharacters": { + "target": "com.amazonaws.bedrockruntime#GuardrailTextCharactersCoverage", + "traits": { + "smithy.api#documentation": "

The text characters of the guardrail coverage details.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The action of the guardrail coverage details.

" + } + }, "com.amazonaws.bedrockruntime#GuardrailCustomWord": { "type": "structure", "members": { @@ -2097,6 +2158,32 @@ "smithy.api#pattern": "^(([a-z0-9]+)|(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:[0-9]{12}:guardrail/[a-z0-9]+))$" } }, + "com.amazonaws.bedrockruntime#GuardrailInvocationMetrics": { + "type": "structure", + "members": { + "guardrailProcessingLatency": { + "target": "com.amazonaws.bedrockruntime#GuardrailProcessingLatency", + "traits": { + "smithy.api#documentation": "

The processing latency details for the guardrail invocation metrics.

" + } + }, + "usage": { + "target": "com.amazonaws.bedrockruntime#GuardrailUsage", + "traits": { + "smithy.api#documentation": "

The usage details for the guardrail invocation metrics.

" + } + }, + "guardrailCoverage": { + "target": "com.amazonaws.bedrockruntime#GuardrailCoverage", + "traits": { + "smithy.api#documentation": "

The coverage details for the guardrail invocation metrics.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The invocation metrics for the guardrail.

" + } + }, "com.amazonaws.bedrockruntime#GuardrailManagedWord": { "type": "structure", "members": { @@ -2392,6 +2479,9 @@ } } }, + "com.amazonaws.bedrockruntime#GuardrailProcessingLatency": { + "type": "long" + }, "com.amazonaws.bedrockruntime#GuardrailRegexFilter": { "type": "structure", "members": { @@ -2558,6 +2648,26 @@ "smithy.api#documentation": "

The text block to be evaluated by the guardrail.

" } }, + "com.amazonaws.bedrockruntime#GuardrailTextCharactersCoverage": { + "type": "structure", + "members": { + "guarded": { + "target": "com.amazonaws.bedrockruntime#TextCharactersGuarded", + "traits": { + "smithy.api#documentation": "

The text characters that were guarded by the guardrail coverage.

" + } + }, + "total": { + "target": "com.amazonaws.bedrockruntime#TextCharactersTotal", + "traits": { + "smithy.api#documentation": "

The total text characters by the guardrail coverage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The guardrail coverage for the text characters.

" + } + }, "com.amazonaws.bedrockruntime#GuardrailTopic": { "type": "structure", "members": { @@ -3569,6 +3679,12 @@ "target": "com.amazonaws.bedrockruntime#SystemContentBlock" } }, + "com.amazonaws.bedrockruntime#TextCharactersGuarded": { + "type": "integer" + }, + "com.amazonaws.bedrockruntime#TextCharactersTotal": { + "type": "integer" + }, "com.amazonaws.bedrockruntime#ThrottlingException": { "type": "structure", "members": { diff --git a/models/bedrock.json b/models/bedrock.json index 2afed5dcd2..3ab16bd12f 100644 --- a/models/bedrock.json +++ b/models/bedrock.json @@ -1051,7 +1051,7 @@ "clientRequestToken": { "target": "com.amazonaws.bedrock#IdempotencyToken", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request,\n Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency.

", "smithy.api#idempotencyToken": {} } }, @@ -1084,7 +1084,7 @@ "inferenceConfig": { "target": "com.amazonaws.bedrock#EvaluationInferenceConfig", "traits": { - "smithy.api#documentation": "

Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model, and model evaluation job that use human workers support two models.

", + "smithy.api#documentation": "

Specify the models you want to use in your model evaluation job. Automatic model evaluation jobs support a single model or inference profile, and model evaluation job that use human workers support two models or inference profiles.

", "smithy.api#required": {} } }, @@ -1593,7 +1593,7 @@ "vpcConfig": { "target": "com.amazonaws.bedrock#VpcConfig", "traits": { - "smithy.api#documentation": "

VPC configuration (optional). Configuration parameters for the\n private Virtual Private Cloud (VPC) that contains the resources you are using for this job.

" + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) that contains the resources that you're using for this job. For more information, see Protect your model customization jobs using a VPC.

" } } }, @@ -1827,6 +1827,12 @@ "smithy.api#required": {} } }, + "vpcConfig": { + "target": "com.amazonaws.bedrock#VpcConfig", + "traits": { + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" + } + }, "timeoutDurationInHours": { "target": "com.amazonaws.bedrock#ModelInvocationJobTimeoutDurationInHours", "traits": { @@ -2380,7 +2386,7 @@ "modelIdentifier": { "target": "com.amazonaws.bedrock#EvaluationModelIdentifier", "traits": { - "smithy.api#documentation": "

The ARN of the Amazon Bedrock model specified.

", + "smithy.api#documentation": "

The ARN of the Amazon Bedrock model or inference profile specified.

", "smithy.api#required": {} } }, @@ -2393,7 +2399,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains the ARN of the Amazon Bedrock models specified in your model evaluation job. Each Amazon Bedrock model supports different inferenceParams. To learn more about supported inference parameters for Amazon Bedrock models, see Inference parameters for foundation models.

\n

The inferenceParams are specified using JSON. To successfully insert JSON as string make sure that all quotations are properly escaped. For example, \"temperature\":\"0.25\" key value pair would need to be formatted as \\\"temperature\\\":\\\"0.25\\\" to successfully accepted in the request.

" + "smithy.api#documentation": "

Contains the ARN of the Amazon Bedrock model or inference profile specified in your model evaluation job. Each Amazon Bedrock model supports different inferenceParams. To learn more about supported inference parameters for Amazon Bedrock models, see Inference parameters for foundation models.

\n

The inferenceParams are specified using JSON. To successfully insert JSON as string make sure that all quotations are properly escaped. For example, \"temperature\":\"0.25\" key value pair would need to be formatted as \\\"temperature\\\":\\\"0.25\\\" to successfully accepted in the request.

" } }, "com.amazonaws.bedrock#EvaluationConfig": { @@ -2687,7 +2693,7 @@ "bedrockModel": { "target": "com.amazonaws.bedrock#EvaluationBedrockModel", "traits": { - "smithy.api#documentation": "

Defines the Amazon Bedrock model and inference parameters you want used.

" + "smithy.api#documentation": "

Defines the Amazon Bedrock model or inference profile and inference parameters you want used.

" } } }, @@ -2714,7 +2720,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}(([:][a-z0-9-]{1,63}){0,2})?/[a-z0-9]{12})|(:foundation-model/([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2})))|(([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.]?[a-z0-9-]{1,63})([:][a-z0-9-]{1,63}){0,2}))|(([0-9a-zA-Z][_-]?)+)$" + "smithy.api#pattern": "^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:((:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})|([0-9]{12}:inference-profile/(([a-z]{2}.)[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))))|(([a-z]{2}[.]{1})([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))$" } }, "com.amazonaws.bedrock#EvaluationModelIdentifiers": { @@ -4471,6 +4477,12 @@ "smithy.api#required": {} } }, + "vpcConfig": { + "target": "com.amazonaws.bedrock#VpcConfig", + "traits": { + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" + } + }, "timeoutDurationInHours": { "target": "com.amazonaws.bedrock#ModelInvocationJobTimeoutDurationInHours", "traits": { @@ -8422,10 +8434,16 @@ "smithy.api#documentation": "

The S3 location of the input data.

", "smithy.api#required": {} } + }, + "s3BucketOwner": { + "target": "com.amazonaws.bedrock#AccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the S3 bucket containing the input data.

" + } } }, "traits": { - "smithy.api#documentation": "

Contains the configuration of the S3 location of the output data.

" + "smithy.api#documentation": "

Contains the configuration of the S3 location of the input data.

" } }, "com.amazonaws.bedrock#ModelInvocationJobS3OutputDataConfig": { @@ -8443,6 +8461,12 @@ "traits": { "smithy.api#documentation": "

The unique identifier of the key that encrypts the S3 location of the output data.

" } + }, + "s3BucketOwner": { + "target": "com.amazonaws.bedrock#AccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the S3 bucket containing the output data.

" + } } }, "traits": { @@ -8602,6 +8626,12 @@ "smithy.api#required": {} } }, + "vpcConfig": { + "target": "com.amazonaws.bedrock#VpcConfig", + "traits": { + "smithy.api#documentation": "

The configuration of the Virtual Private Cloud (VPC) for the data in the batch inference job. For more information, see Protect batch inference jobs using a VPC.

" + } + }, "timeoutDurationInHours": { "target": "com.amazonaws.bedrock#ModelInvocationJobTimeoutDurationInHours", "traits": { @@ -9940,20 +9970,20 @@ "subnetIds": { "target": "com.amazonaws.bedrock#SubnetIds", "traits": { - "smithy.api#documentation": "

VPC configuration subnets.

", + "smithy.api#documentation": "

An array of IDs for each subnet in the VPC to use.

", "smithy.api#required": {} } }, "securityGroupIds": { "target": "com.amazonaws.bedrock#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

VPC configuration security group Ids.

", + "smithy.api#documentation": "

An array of IDs for each security group in the VPC to use.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

VPC configuration.

" + "smithy.api#documentation": "

The configuration of a virtual private cloud (VPC). For more information, see Protect your data using Amazon Virtual Private Cloud and Amazon Web Services PrivateLink.

" } } } diff --git a/models/budgets.json b/models/budgets.json index 00dc803101..0fea2c0505 100644 --- a/models/budgets.json +++ b/models/budgets.json @@ -340,6 +340,108 @@ }, "type": "endpoint" }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-e" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.global.cloud.adc-e.uk", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-f" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.global.csp.hci.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isof-south-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { @@ -858,6 +960,50 @@ "UseDualStack": false } }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://budgets.global.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://budgets.global.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { @@ -3861,7 +4007,7 @@ "min": 25, "max": 684 }, - "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::(\\d{12}|aws):policy(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$" + "smithy.api#pattern": "^arn:aws(-cn|-us-gov|-iso|-iso-[a-z]{1})?:iam::(\\d{12}|aws):policy(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$" } }, "com.amazonaws.budgets#PolicyId": { @@ -3978,7 +4124,7 @@ "min": 32, "max": 618 }, - "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov|us-iso-east-1|us-isob-east-1):iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$" + "smithy.api#pattern": "^arn:aws(-cn|-us-gov|-iso|-iso-[a-z]{1})?:iam::\\d{12}:role(\\u002F[\\u0021-\\u007F]+\\u002F|\\u002F)[\\w+=,.@-]+$" } }, "com.amazonaws.budgets#Roles": { diff --git a/models/chatbot.json b/models/chatbot.json index 1940cb5dca..6b634fabe6 100644 --- a/models/chatbot.json +++ b/models/chatbot.json @@ -112,6 +112,18 @@ "traits": { "smithy.api#documentation": "

A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.

" } + }, + "State": { + "target": "com.amazonaws.chatbot#ResourceState", + "traits": { + "smithy.api#documentation": "

Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration.\n\t For example, if Amazon Chime is disabled.

" + } + }, + "StateReason": { + "target": "com.amazonaws.chatbot#String", + "traits": { + "smithy.api#documentation": "

Provided if State is DISABLED. Provides context as to why the resource is disabled.

" + } } }, "traits": { @@ -177,6 +189,18 @@ "traits": { "smithy.api#documentation": "

The name of the Microsoft Teams Team.

" } + }, + "State": { + "target": "com.amazonaws.chatbot#ResourceState", + "traits": { + "smithy.api#documentation": "

Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration.\n\t For example, if Amazon Chime is disabled.

" + } + }, + "StateReason": { + "target": "com.amazonaws.chatbot#String", + "traits": { + "smithy.api#documentation": "

Provided if State is DISABLED. Provides context as to why the resource is disabled.

" + } } }, "traits": { @@ -2030,6 +2054,12 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.chatbot#ResourceState": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(ENABLED|DISABLED)$" + } + }, "com.amazonaws.chatbot#ServiceUnavailableException": { "type": "structure", "members": { @@ -2124,6 +2154,18 @@ "traits": { "smithy.api#documentation": "

A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.

" } + }, + "State": { + "target": "com.amazonaws.chatbot#ResourceState", + "traits": { + "smithy.api#documentation": "

Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration.\n\t For example, if Amazon Chime is disabled.

" + } + }, + "StateReason": { + "target": "com.amazonaws.chatbot#String", + "traits": { + "smithy.api#documentation": "

Provided if State is DISABLED. Provides context as to why the resource is disabled.

" + } } }, "traits": { @@ -2249,6 +2291,18 @@ "smithy.api#documentation": "

The name of the Slack workspace.

", "smithy.api#required": {} } + }, + "State": { + "target": "com.amazonaws.chatbot#ResourceState", + "traits": { + "smithy.api#documentation": "

Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration.\n\t For example, if Amazon Chime is disabled.

" + } + }, + "StateReason": { + "target": "com.amazonaws.chatbot#String", + "traits": { + "smithy.api#documentation": "

Provided if State is DISABLED. Provides context as to why the resource is disabled.

" + } } }, "traits": { @@ -2267,6 +2321,9 @@ "target": "com.amazonaws.chatbot#Arn" } }, + "com.amazonaws.chatbot#String": { + "type": "string" + }, "com.amazonaws.chatbot#Tag": { "type": "structure", "members": { @@ -2500,6 +2557,18 @@ "traits": { "smithy.api#documentation": "

A map of tags assigned to a resource. A tag is a string-to-string map of key-value pairs.

" } + }, + "State": { + "target": "com.amazonaws.chatbot#ResourceState", + "traits": { + "smithy.api#documentation": "

Either ENABLED or DISABLED. The resource returns DISABLED if the organization's AWS Chatbot policy has explicitly denied that configuration.\n\t For example, if Amazon Chime is disabled.

" + } + }, + "StateReason": { + "target": "com.amazonaws.chatbot#String", + "traits": { + "smithy.api#documentation": "

Provided if State is DISABLED. Provides context as to why the resource is disabled.

" + } } }, "traits": { diff --git a/models/clouddirectory.json b/models/clouddirectory.json index a7fbd382ac..a214b41c12 100644 --- a/models/clouddirectory.json +++ b/models/clouddirectory.json @@ -81,6 +81,24 @@ ], "traits": { "smithy.api#documentation": "

Adds a new Facet to an object. An object can have more than one facet applied on it.

", + "smithy.api#examples": [ + { + "title": "To add a facet to an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "SchemaFacet": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1", + "FacetName": "node1" + }, + "ObjectAttributeList": [], + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWmspn1fxfQmSQaaVKSbvEiQ" + } + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/object/facets", @@ -390,7 +408,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -433,7 +450,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -446,7 +464,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -460,7 +477,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -483,7 +499,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -518,7 +533,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -529,14 +543,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -550,14 +566,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -566,18 +580,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -586,7 +599,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -606,14 +620,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -627,7 +643,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -647,7 +662,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -658,14 +672,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -676,9 +692,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1172,6 +1190,20 @@ ], "traits": { "smithy.api#documentation": "

Copies the input published schema, at the specified version, into the Directory with the same\n name and version as that of the published schema.

", + "smithy.api#examples": [ + { + "title": "To apply a schema", + "documentation": "", + "input": { + "PublishedSchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/published/org/1", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI" + }, + "output": { + "AppliedSchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI/schema/org/1", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/schema/apply", @@ -1276,6 +1308,25 @@ ], "traits": { "smithy.api#documentation": "

Attaches an existing object to another object. An object can be accessed in two\n ways:

\n
    \n
  1. \n

    Using the path

    \n
  2. \n
  3. \n

    Using ObjectIdentifier\n

    \n
  4. \n
", + "smithy.api#examples": [ + { + "title": "To attach an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "ParentReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "ChildReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "LinkName": "link2" + }, + "output": { + "AttachedObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/object/attach", @@ -1373,6 +1424,22 @@ ], "traits": { "smithy.api#documentation": "

Attaches a policy object to a regular object. An object can have a limited number of attached\n policies.

", + "smithy.api#examples": [ + { + "title": "To attach a policy to an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "PolicyReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWgcBsTVmcQEWs6jlygfhuew" + }, + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWQoovm1s3Ts2v0NKrzdVnPw" + } + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/policy/attach", @@ -1465,6 +1532,24 @@ ], "traits": { "smithy.api#documentation": "

Attaches the specified object to the specified index.

", + "smithy.api#examples": [ + { + "title": "To attach a index to an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "IndexReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TW45F26R1HTY2z-stwKBte_Q" + }, + "TargetReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + }, + "output": { + "AttachedObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/index/attach", @@ -1558,6 +1643,55 @@ ], "traits": { "smithy.api#documentation": "

Attaches a typed link to a specified source and target object. For more information, see Typed Links.

", + "smithy.api#examples": [ + { + "title": "To attach a typed link to an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "SourceObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "TargetObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "TypedLinkFacet": { + "TypedLinkName": "exampletypedlink8", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + }, + "Attributes": [ + { + "AttributeName": "22", + "Value": { + "BinaryValue": "c3Ry" + } + } + ] + }, + "output": { + "TypedLinkSpecifier": { + "SourceObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "IdentityAttributeValues": [ + { + "AttributeName": "22", + "Value": { + "BinaryValue": "c3Ry" + } + } + ], + "TargetObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "TypedLinkFacet": { + "TypedLinkName": "exampletypedlink8", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + } + } + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/typedlink/attach", @@ -2889,6 +3023,20 @@ ], "traits": { "smithy.api#documentation": "

Performs all the read operations in a batch.

", + "smithy.api#examples": [ + { + "title": "To run a batch read command", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "Operations": [], + "ConsistencyLevel": "EVENTUAL" + }, + "output": { + "Responses": [] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/batchread", @@ -3392,6 +3540,19 @@ ], "traits": { "smithy.api#documentation": "

Performs all the write operations in a batch. Either all the operations succeed or\n none.

", + "smithy.api#examples": [ + { + "title": "To run a batch write command", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "Operations": [] + }, + "output": { + "Responses": [] + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/batchwrite", @@ -3856,6 +4017,22 @@ ], "traits": { "smithy.api#documentation": "

Creates a Directory by copying the published schema into the\n directory. A directory cannot be created without a schema.

\n

You can also quickly create a directory using a managed schema, called the\n QuickStartSchema. For more information, see Managed Schema in the Amazon Cloud Directory Developer Guide.

", + "smithy.api#examples": [ + { + "title": "To create a new Cloud Directory", + "documentation": "", + "input": { + "Name": "ExampleCD", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/published/person/1" + }, + "output": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI", + "Name": "ExampleCD", + "ObjectIdentifier": "AQHzK-KsptZGU78KjmnwGH6i-4guCM3uQFOTA9_NjeHDrg", + "AppliedSchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI/schema/person/1" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/directory/create", @@ -3964,6 +4141,18 @@ ], "traits": { "smithy.api#documentation": "

Creates a new Facet in a schema. Facet creation is allowed only\n in development or applied schemas.

", + "smithy.api#examples": [ + { + "title": "To create a facet", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1", + "Name": "node1", + "ObjectType": "NODE" + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/facet/create", @@ -4064,6 +4253,22 @@ ], "traits": { "smithy.api#documentation": "

Creates an index object. See Indexing and search for more information.

", + "smithy.api#examples": [ + { + "title": "To create an index", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8", + "OrderedIndexedAttributeList": [], + "IsUnique": true, + "ParentReference": {}, + "LinkName": "Examplelink" + }, + "output": { + "ObjectIdentifier": "AQF0Fw173YJDlpLUV1eB50WvYsWFtVoUSmOzZjz_BLULIA" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/index", @@ -4173,6 +4378,24 @@ ], "traits": { "smithy.api#documentation": "

Creates an object in a Directory. Additionally attaches the object to\n a parent, if a parent reference and LinkName is specified. An object is simply a\n collection of Facet attributes. You can also use this API call to create a\n policy object, if the facet from which you create the object is a policy facet.

", + "smithy.api#examples": [ + { + "title": "To create an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8", + "SchemaFacets": [ + { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8/schema/ExampleOrgPersonSchema/1", + "FacetName": "Organization_Person" + } + ] + }, + "output": { + "ObjectIdentifier": "AQF0Fw173YJDlpLUV1eB50WvScvjsYXcS3K2nP1HwDuuYQ" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/object", @@ -4268,6 +4491,18 @@ ], "traits": { "smithy.api#documentation": "

Creates a new schema in a development state. A schema can exist in three\n phases:

\n
    \n
  • \n

    \n Development: This is a mutable phase of the schema. All new\n schemas are in the development phase. Once the schema is finalized, it can be\n published.

    \n
  • \n
  • \n

    \n Published: Published schemas are immutable and have a version\n associated with them.

    \n
  • \n
  • \n

    \n Applied: Applied schemas are mutable in a way that allows you\n to add new schema facets. You can also add new, nonrequired attributes to existing schema\n facets. You can apply only published schemas to directories.

    \n
  • \n
", + "smithy.api#examples": [ + { + "title": "To create a schema", + "documentation": "", + "input": { + "Name": "Customers" + }, + "output": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/development/Customers" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/schema/create", @@ -4346,6 +4581,29 @@ ], "traits": { "smithy.api#documentation": "

Creates a TypedLinkFacet. For more information, see Typed Links.

", + "smithy.api#examples": [ + { + "title": "To create a typed link facet", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/development/typedlinkschema", + "Facet": { + "Name": "FacetExample", + "Attributes": [ + { + "Name": "1", + "Type": "BINARY", + "RequiredBehavior": "REQUIRED_ALWAYS" + } + ], + "IdentityAttributeOrder": [ + "1" + ] + } + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/typedlink/facet/create", @@ -4428,6 +4686,18 @@ ], "traits": { "smithy.api#documentation": "

Deletes a directory. Only disabled directories can be deleted. A deleted directory cannot be undone. Exercise extreme\n caution\n when deleting directories.

", + "smithy.api#examples": [ + { + "title": "To delete a directory", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + }, + "output": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/directory", @@ -4505,6 +4775,17 @@ ], "traits": { "smithy.api#documentation": "

Deletes a given Facet. All attributes and Rules\n that are associated with the facet will be deleted. Only development schema facets are allowed\n deletion.

", + "smithy.api#examples": [ + { + "title": "To delete a facet", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/development/exampleorgtest", + "Name": "Organization" + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/facet/delete", @@ -4581,6 +4862,19 @@ ], "traits": { "smithy.api#documentation": "

Deletes an object and its associated attributes. Only objects with no children and no\n parents can be deleted. The maximum number of attributes that can be deleted during an object deletion is 30. For more information, see Amazon Cloud Directory Limits.

", + "smithy.api#examples": [ + { + "title": "To delete an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI", + "ObjectReference": { + "Selector": "$AQHzK-KsptZGU78KjmnwGH6i8H-voMZDSNCqfx-fRUcBFg" + } + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/object/delete", @@ -4654,6 +4948,18 @@ ], "traits": { "smithy.api#documentation": "

Deletes a given schema. Schemas in a development and published state can only be deleted.

", + "smithy.api#examples": [ + { + "title": "To delete a schema", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/development/exampleorgtest" + }, + "output": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/development/exampleorgtest" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/schema", @@ -4727,6 +5033,17 @@ ], "traits": { "smithy.api#documentation": "

Deletes a TypedLinkFacet. For more information, see Typed Links.

", + "smithy.api#examples": [ + { + "title": "To delete a typed link facet", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:schema/development/typedlinkschematest", + "Name": "ExampleFacet" + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/typedlink/facet/delete", @@ -4806,6 +5123,24 @@ ], "traits": { "smithy.api#documentation": "

Detaches the specified object from the specified index.

", + "smithy.api#examples": [ + { + "title": "To detach an object from an index", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "IndexReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TW45F26R1HTY2z-stwKBte_Q" + }, + "TargetReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + }, + "output": { + "DetachedObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/index/detach", @@ -4896,6 +5231,22 @@ ], "traits": { "smithy.api#documentation": "

Detaches a given object from the parent object. The object that is to be detached from the\n parent is specified by the link name.

", + "smithy.api#examples": [ + { + "title": "To detach an object from its parent object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "ParentReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "LinkName": "link2" + }, + "output": { + "DetachedObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/object/detach", @@ -4986,6 +5337,22 @@ ], "traits": { "smithy.api#documentation": "

Detaches a policy from an object.

", + "smithy.api#examples": [ + { + "title": "To detach a policy from an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "PolicyReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWgcBsTVmcQEWs6jlygfhuew" + }, + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWQoovm1s3Ts2v0NKrzdVnPw" + } + }, + "output": {} + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/policy/detach", @@ -5069,6 +5436,35 @@ ], "traits": { "smithy.api#documentation": "

Detaches a typed link from a specified source and target object. For more information, see Typed Links.

", + "smithy.api#examples": [ + { + "title": "To detach a typed link from an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "TypedLinkSpecifier": { + "SourceObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "IdentityAttributeValues": [ + { + "AttributeName": "22", + "Value": { + "BinaryValue": "c3Ry" + } + } + ], + "TargetObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "TypedLinkFacet": { + "TypedLinkName": "exampletypedlink8", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + } + } + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/typedlink/detach", @@ -5261,6 +5657,18 @@ ], "traits": { "smithy.api#documentation": "

Disables the specified directory. Disabled directories cannot be read or written to.\n Only enabled directories can be disabled. Disabled directories may be reenabled.

", + "smithy.api#examples": [ + { + "title": "To disable a directory", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + }, + "output": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/directory/disable", @@ -5335,6 +5743,18 @@ ], "traits": { "smithy.api#documentation": "

Enables the specified directory. Only disabled directories can be enabled. Once\n enabled, the directory can then be read and written to.

", + "smithy.api#examples": [ + { + "title": "To enable a disabled directory", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + }, + "output": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/amazonclouddirectory/2017-01-11/directory/enable", @@ -5748,6 +6168,23 @@ ], "traits": { "smithy.api#documentation": "

Retrieves metadata about a directory.

", + "smithy.api#examples": [ + { + "title": "To get information about a directory", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY" + }, + "output": { + "Directory": { + "CreationDateTime": 1.506115781186E9, + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "Name": "ExampleCD", + "State": "ENABLED" + } + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/directory/get", @@ -5822,6 +6259,22 @@ ], "traits": { "smithy.api#documentation": "

Gets details of the Facet, such as facet name, attributes, Rules, or ObjectType. You can call this on all kinds of schema\n facets -- published, development, or applied.

", + "smithy.api#examples": [ + { + "title": "To get information about a facet", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1", + "Name": "node2" + }, + "output": { + "Facet": { + "Name": "node2", + "ObjectType": "NODE" + } + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/facet", @@ -6102,6 +6555,28 @@ ], "traits": { "smithy.api#documentation": "

Retrieves metadata about an object.

", + "smithy.api#examples": [ + { + "title": "To get information about an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWmspn1fxfQmSQaaVKSbvEiQ" + }, + "ConsistencyLevel": "SERIALIZABLE" + }, + "output": { + "SchemaFacets": [ + { + "FacetName": "node2", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + } + ], + "ObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TWmspn1fxfQmSQaaVKSbvEiQ" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/object/information", @@ -6192,6 +6667,19 @@ ], "traits": { "smithy.api#documentation": "

Retrieves a JSON representation of the schema. See JSON Schema Format for more information.

", + "smithy.api#examples": [ + { + "title": "To get schema information and display it in JSON format", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + }, + "output": { + "Document": "{\"sourceSchemaArn\":\"arn:aws:clouddirectory:us-west-2:45132example:schema/published/org/1\",\"facets\":{\"node2\":{\"facetAttributes\":{},\"objectType\":\"NODE\"},\"Organization\":{\"facetAttributes\":{\"account_id\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"account_name\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"telephone_number\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"description\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_country\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_state\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_street2\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_street1\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"web_site\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"email\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_city\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"organization_status\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_postal_code\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"}},\"objectType\":\"LEAF_NODE\"},\"nodex\":{\"facetAttributes\":{},\"objectType\":\"NODE\"},\"Legal_Entity\":{\"facetAttributes\":{\"industry_vertical\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"registered_company_name\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"billing_currency\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_country\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_state\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_street2\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_street1\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"tax_id\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_city\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"},\"mailing_address_postal_code\":{\"attributeDefinition\":{\"attributeType\":\"STRING\",\"isImmutable\":false,\"attributeRules\":{\"nameLength\":{\"parameters\":{\"min\":\"1\",\"max\":\"1024\"},\"ruleType\":\"STRING_LENGTH\"}}},\"requiredBehavior\":\"NOT_REQUIRED\"}},\"objectType\":\"LEAF_NODE\"},\"policyfacet\":{\"facetAttributes\":{},\"objectType\":\"POLICY\"},\"node1\":{\"facetAttributes\":{},\"objectType\":\"NODE\"}},\"typedLinkFacets\":{\"exampletypedlink\":{\"facetAttributes\":{\"1\":{\"attributeDefinition\":{\"attributeType\":\"BINARY\",\"isImmutable\":false,\"attributeRules\":{}},\"requiredBehavior\":\"REQUIRED_ALWAYS\"}},\"identityAttributeOrder\":[\"1\"]},\"exampletypedlink8\":{\"facetAttributes\":{\"22\":{\"attributeDefinition\":{\"attributeType\":\"BINARY\",\"isImmutable\":false,\"attributeRules\":{}},\"requiredBehavior\":\"REQUIRED_ALWAYS\"}},\"identityAttributeOrder\":[\"22\"]}}}", + "Name": "org" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/schema/json", @@ -6274,6 +6762,21 @@ ], "traits": { "smithy.api#documentation": "

Returns the identity attribute order for a specific TypedLinkFacet. For more information, see Typed Links.

", + "smithy.api#examples": [ + { + "title": "To get information about a typed link facet", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1", + "Name": "exampletypedlink8" + }, + "output": { + "IdentityAttributeOrder": [ + "22" + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/typedlink/facet/get", @@ -6601,6 +7104,20 @@ ], "traits": { "smithy.api#documentation": "

Lists schema major versions applied to a directory. If SchemaArn is provided, lists the minor version.

", + "smithy.api#examples": [ + { + "title": "To list applied schema ARNs for a specified directory", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY" + }, + "output": { + "SchemaArns": [ + "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/schema/applied", @@ -6702,6 +7219,26 @@ ], "traits": { "smithy.api#documentation": "

Lists indices attached to the specified object.

", + "smithy.api#examples": [ + { + "title": "To list the indices attached to an object", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "TargetReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + }, + "output": { + "IndexAttachments": [ + { + "IndexedAttributes": [], + "ObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TW45F26R1HTY2z-stwKBte_Q" + } + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/object/indices", @@ -6812,6 +7349,22 @@ ], "traits": { "smithy.api#documentation": "

Retrieves each Amazon Resource Name (ARN) of schemas in the development\n state.

", + "smithy.api#examples": [ + { + "title": "To list all development schema arns in your AWS account", + "documentation": "", + "output": { + "SchemaArns": [ + "arn:aws:clouddirectory:us-west-2:45132example:schema/development/typedlinkschematest", + "arn:aws:clouddirectory:us-west-2:45132example:schema/development/testCDschema", + "arn:aws:clouddirectory:us-west-2:45132example:schema/development/Customers", + "arn:aws:clouddirectory:us-west-2:45132example:schema/development/CourseCatalog", + "arn:aws:clouddirectory:us-west-2:45132example:schema/development/Consumers", + "arn:aws:clouddirectory:us-west-2:45132example:schema/development/exampleorg" + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/schema/development", @@ -6897,6 +7450,46 @@ ], "traits": { "smithy.api#documentation": "

Lists directories created within an account.

", + "smithy.api#examples": [ + { + "title": "To list all directories in your AWS account", + "documentation": "", + "output": { + "Directories": [ + { + "State": "ENABLED", + "CreationDateTime": 1.506121791167E9, + "Name": "ExampleCD4", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/Ae89hOKmw0bRpvYgW8EAsus" + }, + { + "State": "DELETED", + "CreationDateTime": 1.485473189746E9, + "Name": "testCD", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AXQXDXvdgkOWktRXV4HnRa8" + }, + { + "State": "ENABLED", + "CreationDateTime": 1.506115781186E9, + "Name": "ExampleCD", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY" + }, + { + "State": "ENABLED", + "CreationDateTime": 1.506118003859E9, + "Name": "ExampleCD2", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AfMr4qym1kZTvwqOafAYfqI" + }, + { + "State": "DELETED", + "CreationDateTime": 1.485477107925E9, + "Name": "testCD2", + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AWeI1yjiB0SylWVTvQklCD0" + } + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/directory/list", @@ -6995,6 +7588,191 @@ ], "traits": { "smithy.api#documentation": "

Retrieves attributes attached to the facet.

", + "smithy.api#examples": [ + { + "title": "To list facet attributes", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1", + "Name": "Organization" + }, + "output": { + "Attributes": [ + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "account_id" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "account_name" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "description" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "email" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "mailing_address_city" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "mailing_address_country" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "mailing_address_postal_code" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "mailing_address_state" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "mailing_address_street1" + }, + { + "AttributeDefinition": { + "Rules": { + "nameLength": { + "Type": "STRING_LENGTH", + "Parameters": { + "max": "1024", + "min": "1" + } + } + }, + "Type": "STRING", + "IsImmutable": false + }, + "RequiredBehavior": "NOT_REQUIRED", + "Name": "mailing_address_street2" + } + ], + "NextToken": "V0b3JnYW5pemF0aW9uX3N0YXR1cw==" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/facet/attributes", @@ -7098,6 +7876,25 @@ ], "traits": { "smithy.api#documentation": "

Retrieves the names of facets that exist in a schema.

", + "smithy.api#examples": [ + { + "title": "To list facet names", + "documentation": "", + "input": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + }, + "output": { + "FacetNames": [ + "Legal_Entity", + "Organization", + "node1", + "node2", + "nodex", + "policyfacet" + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/facet/list", @@ -7200,6 +7997,83 @@ ], "traits": { "smithy.api#documentation": "

Returns a paginated list of all the incoming TypedLinkSpecifier\n information for an object. It also supports filtering by typed link facet and identity\n attributes. For more information, see Typed Links.

", + "smithy.api#examples": [ + { + "title": "To list incoming typed links", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + }, + "output": { + "LinkSpecifiers": [ + { + "SourceObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "IdentityAttributeValues": [ + { + "AttributeName": "22", + "Value": { + "BinaryValue": "" + } + } + ], + "TargetObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "TypedLinkFacet": { + "TypedLinkName": "exampletypedlink8", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + } + }, + { + "SourceObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "IdentityAttributeValues": [ + { + "AttributeName": "22", + "Value": { + "BinaryValue": "MA==" + } + } + ], + "TargetObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "TypedLinkFacet": { + "TypedLinkName": "exampletypedlink8", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + } + }, + { + "SourceObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + }, + "IdentityAttributeValues": [ + { + "AttributeName": "22", + "Value": { + "BinaryValue": "c3Ry" + } + } + ], + "TargetObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + }, + "TypedLinkFacet": { + "TypedLinkName": "exampletypedlink8", + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1" + } + } + ], + "NextToken": "" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/typedlink/incoming", @@ -7325,6 +8199,35 @@ ], "traits": { "smithy.api#documentation": "

Lists objects attached to the specified index.

", + "smithy.api#examples": [ + { + "title": "To list an index", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "IndexReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TW45F26R1HTY2z-stwKBte_Q" + } + }, + "output": { + "IndexAttachments": [ + { + "ObjectIdentifier": "AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw", + "IndexedAttributes": [ + { + "Value": {}, + "Key": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1", + "FacetName": "Organization", + "Name": "description" + } + } + ] + } + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/index/targets", @@ -7535,6 +8438,42 @@ ], "traits": { "smithy.api#documentation": "

Lists all attributes that are associated with an object.\n

", + "smithy.api#examples": [ + { + "title": "To list object attributes", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TW45F26R1HTY2z-stwKBte_Q" + } + }, + "output": { + "Attributes": [ + { + "Value": { + "BooleanValue": true + }, + "Key": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/CloudDirectory/1.0", + "FacetName": "INDEX", + "Name": "index_is_unique" + } + }, + { + "Value": { + "StringValue": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/org/1*Organization*description" + }, + "Key": { + "SchemaArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY/schema/CloudDirectory/1.0", + "FacetName": "INDEX", + "Name": "ordered_indexed_attributes" + } + } + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/object/attributes", @@ -7657,6 +8596,23 @@ ], "traits": { "smithy.api#documentation": "

Returns a paginated list of child objects that are associated with a given\n object.

", + "smithy.api#examples": [ + { + "title": "To list an objects children", + "documentation": "", + "input": { + "DirectoryArn": "arn:aws:clouddirectory:us-west-2:45132example:directory/AYb8AOV81kHNgdj8mAO3dNY", + "ObjectReference": { + "Selector": "$AQGG_ADlfNZBzYHY_JgDt3TWcU7IARvOTeaR09zme1sVsw" + } + }, + "output": { + "Children": { + "link2": "AQGG_ADlfNZBzYHY_JgDt3TWSvfuEnDqTdmeCuTs6YBNUA" + } + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/amazonclouddirectory/2017-01-11/object/children", diff --git a/models/cloudformation.json b/models/cloudformation.json index a2160e7f7e..9334d3409f 100644 --- a/models/cloudformation.json +++ b/models/cloudformation.json @@ -119,7 +119,7 @@ "Name": { "target": "com.amazonaws.cloudformation#LimitName", "traits": { - "smithy.api#documentation": "

The name of the account limit.

\n

Values: ConcurrentResourcesLimit | StackLimit | StackOutputsLimit\n

" + "smithy.api#documentation": "

The name of the account limit.

\n

Values: ConcurrentResourcesLimit | StackLimit |\n StackOutputsLimit\n

" } }, "Value": { @@ -130,7 +130,7 @@ } }, "traits": { - "smithy.api#documentation": "

The AccountLimit data type.

\n

CloudFormation has the following limits per account:

\n
    \n
  • \n

    Number of concurrent resources

    \n
  • \n
  • \n

    Number of stacks

    \n
  • \n
  • \n

    Number of stack outputs

    \n
  • \n
\n

For more information about these account limits, and other CloudFormation limits, see CloudFormation quotas in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

The AccountLimit data type.

\n

CloudFormation has the following limits per account:

\n
    \n
  • \n

    Number of concurrent resources

    \n
  • \n
  • \n

    Number of stacks

    \n
  • \n
  • \n

    Number of stack outputs

    \n
  • \n
\n

For more information about these account limits, and other CloudFormation limits, see Understand CloudFormation quotas in the CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#AccountLimitList": { @@ -172,7 +172,7 @@ } ], "traits": { - "smithy.api#documentation": "

Activate trusted access with Organizations. With trusted access between StackSets and Organizations\n activated, the management account has permissions to create and manage StackSets for your\n organization.

" + "smithy.api#documentation": "

Activate trusted access with Organizations. With trusted access between StackSets\n and Organizations activated, the management account has permissions to create\n and manage StackSets for your organization.

" } }, "com.amazonaws.cloudformation#ActivateOrganizationsAccessInput": { @@ -206,7 +206,7 @@ } ], "traits": { - "smithy.api#documentation": "

Activates a public third-party extension, making it available for use in stack templates. For more information,\n see Using public\n extensions in the CloudFormation User Guide.

\n

Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For more information, see\n Configuring extensions at\n the account level in the CloudFormation User Guide.

", + "smithy.api#documentation": "

Activates a public third-party extension, making it available for use in stack templates.\n Once you have activated a public third-party extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For\n more information, see Using public\n extensions in the CloudFormation User Guide.

", "smithy.api#idempotent": {} } }, @@ -216,37 +216,37 @@ "Type": { "target": "com.amazonaws.cloudformation#ThirdPartyType", "traits": { - "smithy.api#documentation": "

The extension type.

\n

Conditional: You must specify PublicTypeArn, or TypeName, Type, and\n PublisherId.

" + "smithy.api#documentation": "

The extension type.

\n

Conditional: You must specify PublicTypeArn, or TypeName,\n Type, and PublisherId.

" } }, "PublicTypeArn": { "target": "com.amazonaws.cloudformation#ThirdPartyTypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the public extension.

\n

Conditional: You must specify PublicTypeArn, or TypeName, Type, and\n PublisherId.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the public extension.

\n

Conditional: You must specify PublicTypeArn, or TypeName,\n Type, and PublisherId.

" } }, "PublisherId": { "target": "com.amazonaws.cloudformation#PublisherId", "traits": { - "smithy.api#documentation": "

The ID of the extension publisher.

\n

Conditional: You must specify PublicTypeArn, or TypeName, Type, and\n PublisherId.

" + "smithy.api#documentation": "

The ID of the extension publisher.

\n

Conditional: You must specify PublicTypeArn, or TypeName,\n Type, and PublisherId.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify PublicTypeArn, or TypeName, Type, and\n PublisherId.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify PublicTypeArn, or TypeName,\n Type, and PublisherId.

" } }, "TypeNameAlias": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

An alias to assign to the public extension, in this account and Region. If you specify an alias for the\n extension, CloudFormation treats the alias as the extension type name within this account and Region. You must use the\n alias to refer to the extension in your templates, API calls, and CloudFormation console.

\n

An extension alias must be unique within a given account and Region. You can activate the same public resource\n multiple times in the same account and Region, using different type name aliases.

" + "smithy.api#documentation": "

An alias to assign to the public extension, in this account and Region. If you specify an\n alias for the extension, CloudFormation treats the alias as the extension type name within this\n account and Region. You must use the alias to refer to the extension in your templates, API\n calls, and CloudFormation console.

\n

An extension alias must be unique within a given account and Region. You can activate the\n same public resource multiple times in the same account and Region, using different type name\n aliases.

" } }, "AutoUpdate": { "target": "com.amazonaws.cloudformation#AutoUpdate", "traits": { - "smithy.api#documentation": "

Whether to automatically update the extension in this account and Region when a new minor\n version is published by the extension publisher. Major versions released by the publisher must be manually\n updated.

\n

The default is true.

" + "smithy.api#documentation": "

Whether to automatically update the extension in this account and Region when a new\n minor version is published by the extension publisher. Major versions\n released by the publisher must be manually updated.

\n

The default is true.

" } }, "LoggingConfig": { @@ -264,13 +264,13 @@ "VersionBump": { "target": "com.amazonaws.cloudformation#VersionBump", "traits": { - "smithy.api#documentation": "

Manually updates a previously-activated type to a new major or minor version, if available. You can also use\n this parameter to update the value of AutoUpdate.

\n
    \n
  • \n

    \n MAJOR: CloudFormation updates the extension to the newest major version, if one is available.

    \n
  • \n
  • \n

    \n MINOR: CloudFormation updates the extension to the newest minor version, if one is available.

    \n
  • \n
" + "smithy.api#documentation": "

Manually updates a previously-activated type to a new major or minor version, if\n available. You can also use this parameter to update the value of\n AutoUpdate.

\n
    \n
  • \n

    \n MAJOR: CloudFormation updates the extension to the newest major version, if\n one is available.

    \n
  • \n
  • \n

    \n MINOR: CloudFormation updates the extension to the newest minor version, if\n one is available.

    \n
  • \n
" } }, "MajorVersion": { "target": "com.amazonaws.cloudformation#MajorVersion", "traits": { - "smithy.api#documentation": "

The major version of this extension you want to activate, if multiple major versions are available. The default\n is the latest major version. CloudFormation uses the latest available minor version of the major\n version selected.

\n

You can specify MajorVersion or VersionBump, but not both.

" + "smithy.api#documentation": "

The major version of this extension you want to activate, if multiple major versions are\n available. The default is the latest major version. CloudFormation uses the latest available\n minor version of the major version selected.

\n

You can specify MajorVersion or VersionBump, but not\n both.

" } } }, @@ -284,7 +284,7 @@ "Arn": { "target": "com.amazonaws.cloudformation#PrivateTypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the activated extension, in this account and Region.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the activated extension, in this account and\n Region.

" } } }, @@ -393,7 +393,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns configuration data for the specified CloudFormation extensions, from the CloudFormation registry\n for the account and Region.

\n

For more information, see Configuring extensions at\n the account level in the CloudFormation User Guide.

" + "smithy.api#documentation": "

Returns configuration data for the specified CloudFormation extensions, from the CloudFormation\n registry for the account and Region.

\n

For more information, see Edit configuration\n data for extensions in your account in the\n CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#BatchDescribeTypeConfigurationsError": { @@ -419,7 +419,7 @@ } }, "traits": { - "smithy.api#documentation": "

Detailed information concerning an error generated during the setting of configuration data for a CloudFormation extension.

" + "smithy.api#documentation": "

Detailed information concerning an error generated during the setting of configuration data\n for a CloudFormation extension.

" } }, "com.amazonaws.cloudformation#BatchDescribeTypeConfigurationsErrors": { @@ -450,19 +450,19 @@ "Errors": { "target": "com.amazonaws.cloudformation#BatchDescribeTypeConfigurationsErrors", "traits": { - "smithy.api#documentation": "

A list of information concerning any errors generated during the setting of the specified configurations.

" + "smithy.api#documentation": "

A list of information concerning any errors generated during the setting of the specified\n configurations.

" } }, "UnprocessedTypeConfigurations": { "target": "com.amazonaws.cloudformation#UnprocessedTypeConfigurations", "traits": { - "smithy.api#documentation": "

A list of any of the specified extension configurations that CloudFormation could not process for any reason.

" + "smithy.api#documentation": "

A list of any of the specified extension configurations that CloudFormation could not process\n for any reason.

" } }, "TypeConfigurations": { "target": "com.amazonaws.cloudformation#TypeConfigurationDetailsList", "traits": { - "smithy.api#documentation": "

A list of any of the specified extension configurations from the CloudFormation registry.

" + "smithy.api#documentation": "

A list of any of the specified extension configurations from the CloudFormation\n registry.

" } } }, @@ -494,7 +494,7 @@ "Message": { "target": "com.amazonaws.cloudformation#ErrorMessage", "traits": { - "smithy.api#documentation": "

An message with details about the error that occurred.

" + "smithy.api#documentation": "

A message with details about the error that occurred.

" } } }, @@ -539,7 +539,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels an update on the specified stack. If the call completes successfully, the stack rolls back the update\n and reverts to the previous stack configuration.

\n \n

You can cancel only stacks that are in the UPDATE_IN_PROGRESS state.

\n
" + "smithy.api#documentation": "

Cancels an update on the specified stack. If the call completes successfully, the stack\n rolls back the update and reverts to the previous stack configuration.

\n \n

You can cancel only stacks that are in the UPDATE_IN_PROGRESS state.

\n
" } }, "com.amazonaws.cloudformation#CancelUpdateStackInput": { @@ -549,14 +549,14 @@ "target": "com.amazonaws.cloudformation#StackName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "\n

If you don't pass a parameter to StackName, the API returns a response that describes all\n resources in the account.

\n

The IAM policy below can be added to IAM policies when you want to limit resource-level permissions and\n avoid returning a response when no parameter is sent in the request:

\n

\n { \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\": \"cloudformation:DescribeStacks\",\n \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }] }\n

\n
\n

The name or the unique stack ID that's associated with the stack.

", + "smithy.api#documentation": "\n

If you don't pass a parameter to StackName, the API returns a response that\n describes all resources in the account.

\n

The IAM policy below can be added to IAM policies when you want to limit\n resource-level permissions and avoid returning a response when no parameter is sent in the\n request:

\n

\n { \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\":\n \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }]\n }\n

\n
\n

The name or the unique stack ID that's associated with the stack.

", "smithy.api#required": {} } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this CancelUpdateStack request. Specify this token if you plan to retry\n requests so that CloudFormation knows that you're not attempting to cancel an update on a stack with the same name. You might\n retry CancelUpdateStack requests to ensure that CloudFormation successfully received them.

" + "smithy.api#documentation": "

A unique identifier for this CancelUpdateStack request. Specify this token if\n you plan to retry requests so that CloudFormation knows that you're not attempting to cancel an\n update on a stack with the same name. You might retry CancelUpdateStack requests\n to ensure that CloudFormation successfully received them.

" } } }, @@ -641,18 +641,18 @@ "HookInvocationCount": { "target": "com.amazonaws.cloudformation#HookInvocationCount", "traits": { - "smithy.api#documentation": "

Is either null, if no hooks invoke for the resource, or contains the number of hooks that will\n invoke for the resource.

" + "smithy.api#documentation": "

Is either null, if no hooks invoke for the resource, or contains the number of\n hooks that will invoke for the resource.

" } }, "ResourceChange": { "target": "com.amazonaws.cloudformation#ResourceChange", "traits": { - "smithy.api#documentation": "

A ResourceChange structure that describes the resource and action that CloudFormation will perform.

" + "smithy.api#documentation": "

A ResourceChange structure that describes the resource and action that\n CloudFormation will perform.

" } } }, "traits": { - "smithy.api#documentation": "

The Change structure describes the changes CloudFormation will perform if you execute the change set.

" + "smithy.api#documentation": "

The Change structure describes the changes CloudFormation will perform if you\n execute the change set.

" } }, "com.amazonaws.cloudformation#ChangeAction": { @@ -708,7 +708,7 @@ "TypeName": { "target": "com.amazonaws.cloudformation#HookTypeName", "traits": { - "smithy.api#documentation": "

The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of\n Organization::Service::Hook.

\n \n

The following organization namespaces are reserved and can't be used in your hook type names:

\n
    \n
  • \n

    \n Alexa\n

    \n
  • \n
  • \n

    \n AMZN\n

    \n
  • \n
  • \n

    \n Amazon\n

    \n
  • \n
  • \n

    \n ASK\n

    \n
  • \n
  • \n

    \n AWS\n

    \n
  • \n
  • \n

    \n Custom\n

    \n
  • \n
  • \n

    \n Dev\n

    \n
  • \n
\n
" + "smithy.api#documentation": "

The unique name for your hook. Specifies a three-part namespace for your hook, with a\n recommended pattern of Organization::Service::Hook.

\n \n

The following organization namespaces are reserved and can't be used in your hook type\n names:

\n
    \n
  • \n

    \n Alexa\n

    \n
  • \n
  • \n

    \n AMZN\n

    \n
  • \n
  • \n

    \n Amazon\n

    \n
  • \n
  • \n

    \n ASK\n

    \n
  • \n
  • \n

    \n AWS\n

    \n
  • \n
  • \n

    \n Custom\n

    \n
  • \n
  • \n

    \n Dev\n

    \n
  • \n
\n
" } }, "TypeVersionId": { @@ -947,19 +947,19 @@ "ExecutionStatus": { "target": "com.amazonaws.cloudformation#ExecutionStatus", "traits": { - "smithy.api#documentation": "

If the change set execution status is AVAILABLE, you can execute the change set. If you can't\n execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE\n state because CloudFormation is still creating it or in an OBSOLETE state because the stack was already\n updated.

" + "smithy.api#documentation": "

If the change set execution status is AVAILABLE, you can execute the change\n set. If you can't execute the change set, the status indicates why. For example, a change set\n might be in an UNAVAILABLE state because CloudFormation is still creating it or in an\n OBSOLETE state because the stack was already updated.

" } }, "Status": { "target": "com.amazonaws.cloudformation#ChangeSetStatus", "traits": { - "smithy.api#documentation": "

The state of the change set, such as CREATE_IN_PROGRESS, CREATE_COMPLETE, or\n FAILED.

" + "smithy.api#documentation": "

The state of the change set, such as CREATE_IN_PROGRESS,\n CREATE_COMPLETE, or FAILED.

" } }, "StatusReason": { "target": "com.amazonaws.cloudformation#ChangeSetStatusReason", "traits": { - "smithy.api#documentation": "

A description of the change set's status. For example, if your change set is in the FAILED state,\n CloudFormation shows the error message.

" + "smithy.api#documentation": "

A description of the change set's status. For example, if your change set is in the\n FAILED state, CloudFormation shows the error message.

" } }, "CreationTime": { @@ -1000,7 +1000,7 @@ } }, "traits": { - "smithy.api#documentation": "

The ChangeSetSummary structure describes a change set, its status, and the stack with which it's\n associated.

" + "smithy.api#documentation": "

The ChangeSetSummary structure describes a change set, its status, and the\n stack with which it's associated.

" } }, "com.amazonaws.cloudformation#ChangeSetType": { @@ -1360,7 +1360,7 @@ "name": "cloudformation" }, "aws.protocols#awsQuery": {}, - "smithy.api#documentation": "CloudFormation\n

CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and\n repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service, Elastic Load Balancing, and Auto Scaling to build highly reliable, highly\n scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services\n infrastructure.

\n

With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a\n collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack\n together and manages all dependencies between the resources for you.

\n

For more information about CloudFormation, see the CloudFormation\n product page.

\n

CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a\n specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com.

", + "smithy.api#documentation": "CloudFormation\n

CloudFormation allows you to create and manage Amazon Web Services infrastructure deployments predictably and\n repeatedly. You can use CloudFormation to leverage Amazon Web Services products, such as Amazon Elastic Compute Cloud, Amazon Elastic Block Store, Amazon Simple Notification Service,\n Elastic Load Balancing, and Amazon EC2 Auto Scaling to build highly reliable, highly\n scalable, cost-effective applications without creating or configuring the underlying Amazon Web Services\n infrastructure.

\n

With CloudFormation, you declare all your resources and dependencies in a template file. The template defines a\n collection of resources as a single unit called a stack. CloudFormation creates and deletes all member resources of the stack\n together and manages all dependencies between the resources for you.

\n

For more information about CloudFormation, see the CloudFormation\n product page.

\n

CloudFormation makes use of other Amazon Web Services products. If you need additional technical information about a\n specific Amazon Web Services product, you can find the product's technical documentation at docs.aws.amazon.com.

", "smithy.api#title": "AWS CloudFormation", "smithy.api#xmlNamespace": { "uri": "http://cloudformation.amazonaws.com/doc/2010-05-15/" @@ -2452,7 +2452,7 @@ } ], "traits": { - "smithy.api#documentation": "

For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues rolling it back to the\n UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of the failure, you can manually fix the\n error and continue the rollback. By continuing the rollback, you can return your stack to a working state\n (the UPDATE_ROLLBACK_COMPLETE state), and then try to update the stack again.

\n

A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll back all changes after\n a failed stack update. For example, you might have a stack that's rolling back to an old database instance that was\n deleted outside of CloudFormation. Because CloudFormation doesn't know the database was deleted, it assumes that the database instance\n still exists and attempts to roll back to it, causing the update rollback to fail.

" + "smithy.api#documentation": "

For a specified stack that's in the UPDATE_ROLLBACK_FAILED state, continues\n rolling it back to the UPDATE_ROLLBACK_COMPLETE state. Depending on the cause of\n the failure, you can manually fix the error and continue the rollback. By continuing the rollback, you can\n return your stack to a working state (the UPDATE_ROLLBACK_COMPLETE state), and\n then try to update the stack again.

\n

A stack goes into the UPDATE_ROLLBACK_FAILED state when CloudFormation can't roll\n back all changes after a failed stack update. For example, you might have a stack that's\n rolling back to an old database instance that was deleted outside of CloudFormation. Because\n CloudFormation doesn't know the database was deleted, it assumes that the database instance still\n exists and attempts to roll back to it, causing the update rollback to fail.

" } }, "com.amazonaws.cloudformation#ContinueUpdateRollbackInput": { @@ -2462,26 +2462,26 @@ "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or the unique ID of the stack that you want to continue rolling back.

\n \n

Don't specify the name of a nested stack (a stack that was created by using the\n AWS::CloudFormation::Stack resource). Instead, use this operation on the parent stack (the stack that\n contains the AWS::CloudFormation::Stack resource).

\n
", + "smithy.api#documentation": "

The name or the unique ID of the stack that you want to continue rolling back.

\n \n

Don't specify the name of a nested stack (a stack that was created by using the\n AWS::CloudFormation::Stack resource). Instead, use this operation on the\n parent stack (the stack that contains the AWS::CloudFormation::Stack\n resource).

\n
", "smithy.api#required": {} } }, "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to\n roll back the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this\n role for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation\n uses this role even if the users don't have permission to pass it. Ensure that the role grants least\n permission.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role\n is available, CloudFormation uses a temporary session that's generated from your user credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to roll back the\n stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always\n uses this role for all future operations on the stack. Provided that users have permission to\n operate on the stack, CloudFormation uses this role even if the users don't have permission to\n pass it. Ensure that the role grants least permission.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with\n the stack. If no role is available, CloudFormation uses a temporary session that's generated from\n your user credentials.

" } }, "ResourcesToSkip": { "target": "com.amazonaws.cloudformation#ResourcesToSkip", "traits": { - "smithy.api#documentation": "

A list of the logical IDs of the resources that CloudFormation skips during the continue update rollback operation.\n You can specify only resources that are in the UPDATE_FAILED state because a rollback failed. You can't\n specify resources that are in the UPDATE_FAILED state for other reasons, for example, because an update\n was canceled. To check why a resource update failed, use the DescribeStackResources action, and\n view the resource status reason.

\n \n

Specify this property to skip rolling back resources that CloudFormation can't successfully roll back. We recommend\n that you \n troubleshoot resources before skipping them. CloudFormation sets the status of the specified resources to\n UPDATE_COMPLETE and continues to roll back the stack. After the rollback is complete, the state of the\n skipped resources will be inconsistent with the state of the resources in the stack template. Before performing\n another stack update, you must update the stack or resources to be consistent with each other. If you don't,\n subsequent stack updates might fail, and the stack will become unrecoverable.

\n
\n

Specify the minimum number of resources required to successfully roll back your stack. For example, a failed\n resource update might cause dependent resources to fail. In this case, it might not be necessary to skip the\n dependent resources.

\n

To skip resources that are part of nested stacks, use the following format:\n NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a stack resource\n (Type: AWS::CloudFormation::Stack) in the ResourcesToSkip list, then its corresponding\n embedded stack must be in one of the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE,\n or DELETE_FAILED.

\n \n

Don't confuse a child stack's name with its corresponding logical ID defined in the parent stack. For an\n example of a continue update rollback operation with nested stacks, see Using\n ResourcesToSkip to recover a nested stacks hierarchy.

\n
" + "smithy.api#documentation": "

A list of the logical IDs of the resources that CloudFormation skips during the continue\n update rollback operation. You can specify only resources that are in the\n UPDATE_FAILED state because a rollback failed. You can't specify resources that\n are in the UPDATE_FAILED state for other reasons, for example, because an update\n was canceled. To check why a resource update failed, use the DescribeStackResources action, and view the resource status reason.

\n \n

Specify this property to skip rolling back resources that CloudFormation can't successfully\n roll back. We recommend that you troubleshoot resources before skipping them. CloudFormation sets the status of the\n specified resources to UPDATE_COMPLETE and continues to roll back the stack.\n After the rollback is complete, the state of the skipped resources will be inconsistent with\n the state of the resources in the stack template. Before performing another stack update,\n you must update the stack or resources to be consistent with each other. If you don't,\n subsequent stack updates might fail, and the stack will become unrecoverable.

\n
\n

Specify the minimum number of resources required to successfully roll back your stack. For\n example, a failed resource update might cause dependent resources to fail. In this case, it\n might not be necessary to skip the dependent resources.

\n

To skip resources that are part of nested stacks, use the following format:\n NestedStackName.ResourceLogicalID. If you want to specify the logical ID of a\n stack resource (Type: AWS::CloudFormation::Stack) in the\n ResourcesToSkip list, then its corresponding embedded stack must be in one of\n the following states: DELETE_IN_PROGRESS, DELETE_COMPLETE, or\n DELETE_FAILED.

\n \n

Don't confuse a child stack's name with its corresponding logical ID defined in the\n parent stack. For an example of a continue update rollback operation with nested stacks, see\n Continue rolling back from failed nested stack updates.

\n
" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this ContinueUpdateRollback request. Specify this token if you plan to\n retry requests so that CloudFormation knows that you're not attempting to continue the rollback to a stack with the same name.\n You might retry ContinueUpdateRollback requests to ensure that CloudFormation successfully received\n them.

" + "smithy.api#documentation": "

A unique identifier for this ContinueUpdateRollback request. Specify this\n token if you plan to retry requests so that CloudFormation knows that you're not attempting to\n continue the rollback to a stack with the same name. You might retry\n ContinueUpdateRollback requests to ensure that CloudFormation successfully received\n them.

" } } }, @@ -2518,7 +2518,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a list of changes that will be applied to a stack so that you can review the changes before executing\n them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for\n a stack that doesn't exist, the change set shows all of the resources that CloudFormation will create. If you create a change\n set for an existing stack, CloudFormation compares the stack's information with the information that you submit in the change\n set and lists the differences. Use change sets to understand which resources CloudFormation will create or\n change, and how it will change resources in an existing stack, before you create or update a stack.

\n

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify\n CREATE. To create a change set for an existing stack, specify UPDATE for the\n ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for\n the ChangeSetType parameter. After the CreateChangeSet call successfully completes, CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

\n

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. CloudFormation doesn't make changes until you execute the change set.

\n

To create a change set for the entire stack hierarchy, set IncludeNestedStacks to\n True.

" + "smithy.api#documentation": "

Creates a list of changes that will be applied to a stack so that you can review the\n changes before executing them. You can create a change set for a stack that doesn't exist or\n an existing stack. If you create a change set for a stack that doesn't exist, the change set\n shows all of the resources that CloudFormation will create. If you create a change set for an\n existing stack, CloudFormation compares the stack's information with the information that you\n submit in the change set and lists the differences. Use change sets to understand which\n resources CloudFormation will create or change, and how it will change resources in an existing\n stack, before you create or update a stack.

\n

To create a change set for a stack that doesn't exist, for the ChangeSetType\n parameter, specify CREATE. To create a change set for an existing stack, specify\n UPDATE for the ChangeSetType parameter. To create a change set for\n an import operation, specify IMPORT for the ChangeSetType parameter.\n After the CreateChangeSet call successfully completes, CloudFormation starts creating\n the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

\n

When you are satisfied with the changes the change set will make, execute the change set\n by using the ExecuteChangeSet action. CloudFormation doesn't make changes until\n you execute the change set.

\n

To create a change set for the entire stack hierarchy, set\n IncludeNestedStacks to True.

" } }, "com.amazonaws.cloudformation#CreateChangeSetInput": { @@ -2528,82 +2528,82 @@ "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or the unique ID of the stack for which you are creating a change set. CloudFormation generates the change set\n by comparing this stack's information with the information that you submit, such as a modified template or different\n parameter input values.

", + "smithy.api#documentation": "

The name or the unique ID of the stack for which you are creating a change set. CloudFormation\n generates the change set by comparing this stack's information with the information that you\n submit, such as a modified template or different parameter input values.

", "smithy.api#required": {} } }, "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

A structure that contains the body of the revised template, with a minimum length of 1 byte and a maximum length\n of 51,200 bytes. CloudFormation generates the change set by comparing this template with the template of the stack that you\n specified.

\n

Conditional: You must specify only TemplateBody or TemplateURL.

" + "smithy.api#documentation": "

A structure that contains the body of the revised template, with a minimum length of 1\n byte and a maximum length of 51,200 bytes. CloudFormation generates the change set by comparing\n this template with the template of the stack that you specified.

\n

Conditional: You must specify only TemplateBody or\n TemplateURL.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

The location of the file that contains the revised template. The URL must point to a template (max size: 460,800\n bytes) that's located in an Amazon S3 bucket or a Systems Manager document. CloudFormation generates the change set by\n comparing this template with the stack that you specified. The location for an Amazon S3 bucket must start with\n https://.

\n

Conditional: You must specify only TemplateBody or TemplateURL.

" + "smithy.api#documentation": "

The location of the file that contains the revised template. The URL must point to a\n template (max size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager\n document. CloudFormation generates the change set by comparing this template with the stack that\n you specified. The location for an Amazon S3 bucket must start with https://.

\n

Conditional: You must specify only TemplateBody or\n TemplateURL.

" } }, "UsePreviousTemplate": { "target": "com.amazonaws.cloudformation#UsePreviousTemplate", "traits": { - "smithy.api#documentation": "

Whether to reuse the template that's associated with the stack to create the change set.

" + "smithy.api#documentation": "

Whether to reuse the template that's associated with the stack to create the change\n set.

" } }, "Parameters": { "target": "com.amazonaws.cloudformation#Parameters", "traits": { - "smithy.api#documentation": "

A list of Parameter structures that specify input parameters for the change set. For more\n information, see the Parameter data type.

" + "smithy.api#documentation": "

A list of Parameter structures that specify input parameters for the change\n set. For more information, see the Parameter data type.

" } }, "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order\n for CloudFormation to create the stack.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account;\n for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly\n acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the CAPABILITY_IAM or\n CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must specify\n CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we suggest that you review all permissions associated with\n them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM resources in\n CloudFormation templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions\n like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this,\n users typically create a change set from the processed template, so that they can review the changes resulting from\n the macros before actually creating the stack. If your stack template contains one or more macros, and you choose\n to create a stack directly from the processed template, without first reviewing the resulting changes in a change\n set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which\n are macros hosted by CloudFormation.

    \n \n

    This capacity doesn't apply to creating change sets, and specifying it when creating change sets has no\n effect.

    \n

    If you want to create a stack from a stack template that contains macros and nested\n stacks, you must create or update the stack directly from the template using the CreateStack or\n UpdateStack action, and specifying this capability.

    \n
    \n

    For more information about macros, see Using CloudFormation macros to perform custom processing on\n templates.

    \n
  • \n
\n \n

Only one of the Capabilities and ResourceType parameters can be specified.

\n
" + "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to create the stack.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we suggest that you review all\n permissions associated with them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM resources in CloudFormation templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually creating the stack. If your stack template contains one or more macros,\n and you choose to create a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    \n \n

    This capacity doesn't apply to creating change sets, and specifying it when creating\n change sets has no effect.

    \n

    If you want to create a stack from a stack template that contains macros\n and nested stacks, you must create or update the stack directly\n from the template using the CreateStack or UpdateStack action, and specifying this capability.

    \n
    \n

    For more information about macros, see Perform custom processing\n on CloudFormation templates with template macros.

    \n
  • \n
\n \n

Only one of the Capabilities and ResourceType parameters can\n be specified.

\n
" } }, "ResourceTypes": { "target": "com.amazonaws.cloudformation#ResourceTypes", "traits": { - "smithy.api#documentation": "

The template resource types that you have permissions to work with if you execute this change set, such as\n AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

\n

If the list of resource types doesn't include a resource type that you're updating, the stack update fails. By\n default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for\n condition keys in IAM policies for CloudFormation. For more information, see Controlling access with Identity and Access Management in the CloudFormation User Guide.

\n \n

Only one of the Capabilities and ResourceType parameters can be specified.

\n
" + "smithy.api#documentation": "

The template resource types that you have permissions to work with if you execute this\n change set, such as AWS::EC2::Instance, AWS::EC2::*, or\n Custom::MyCustomInstance.

\n

If the list of resource types doesn't include a resource type that you're updating, the\n stack update fails. By default, CloudFormation grants permissions to all resource types. IAM\n uses this parameter for condition keys in IAM policies for CloudFormation. For more information,\n see Control access with\n Identity and Access Management in the CloudFormation User Guide.

\n \n

Only one of the Capabilities and ResourceType parameters can\n be specified.

\n
" } }, "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes when executing the\n change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation uses this role for all future\n operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses this role even if\n the users don't have permission to pass it. Ensure that the role grants least permission.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role is\n available, CloudFormation uses a temporary session that is generated from your user credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes when executing the\n change set. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation\n uses this role for all future operations on the stack. Provided that users have permission to\n operate on the stack, CloudFormation uses this role even if the users don't have permission to\n pass it. Ensure that the role grants least permission.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with\n the stack. If no role is available, CloudFormation uses a temporary session that is generated from\n your user credentials.

" } }, "RollbackConfiguration": { "target": "com.amazonaws.cloudformation#RollbackConfiguration", "traits": { - "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified\n monitoring period afterwards.

" + "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating\n operations, and for the specified monitoring period afterwards.

" } }, "NotificationARNs": { "target": "com.amazonaws.cloudformation#NotificationARNs", "traits": { - "smithy.api#documentation": "

The Amazon Resource Names (ARNs) of Amazon Simple Notification Service (Amazon SNS) topics that CloudFormation associates\n with the stack. To remove all associated notification topics, specify an empty list.

" + "smithy.api#documentation": "

The Amazon Resource Names (ARNs) of Amazon SNS topics that CloudFormation associates with the\n stack. To remove all associated notification topics, specify an empty list.

" } }, "Tags": { "target": "com.amazonaws.cloudformation#Tags", "traits": { - "smithy.api#documentation": "

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to resources in the stack. You\n can specify a maximum of 50 tags.

" + "smithy.api#documentation": "

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to\n resources in the stack. You can specify a maximum of 50 tags.

" } }, "ChangeSetName": { "target": "com.amazonaws.cloudformation#ChangeSetName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the change set. The name must be unique among all change sets that are associated with the specified\n stack.

\n

A change set name can contain only alphanumeric, case sensitive characters, and hyphens. It must start with an\n alphabetical character and can't exceed 128 characters.

", + "smithy.api#documentation": "

The name of the change set. The name must be unique among all change sets that are\n associated with the specified stack.

\n

A change set name can contain only alphanumeric, case sensitive characters, and hyphens.\n It must start with an alphabetical character and can't exceed 128 characters.

", "smithy.api#required": {} } }, "ClientToken": { "target": "com.amazonaws.cloudformation#ClientToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry\n requests so that CloudFormation knows that you're not attempting to create another change set with the same name. You might\n retry CreateChangeSet requests to ensure that CloudFormation successfully received them.

" + "smithy.api#documentation": "

A unique identifier for this CreateChangeSet request. Specify this token if\n you plan to retry requests so that CloudFormation knows that you're not attempting to create\n another change set with the same name. You might retry CreateChangeSet requests\n to ensure that CloudFormation successfully received them.

" } }, "Description": { @@ -2615,7 +2615,7 @@ "ChangeSetType": { "target": "com.amazonaws.cloudformation#ChangeSetType", "traits": { - "smithy.api#documentation": "

The type of change set operation. To create a change set for a new stack, specify CREATE. To create\n a change set for an existing stack, specify UPDATE. To create a change set for an import operation,\n specify IMPORT.

\n

If you create a change set for a new stack, CloudFormation creates a stack with a unique stack ID, but no template or\n resources. The stack will be in the REVIEW_IN_PROGRESS state\n until you execute the change set.

\n

By default, CloudFormation specifies UPDATE. You can't use the UPDATE type to create a change\n set for a new stack or the CREATE type to create a change set for an existing stack.

" + "smithy.api#documentation": "

The type of change set operation. To create a change set for a new stack, specify\n CREATE. To create a change set for an existing stack, specify\n UPDATE. To create a change set for an import operation, specify\n IMPORT.

\n

If you create a change set for a new stack, CloudFormation creates a stack with a unique stack\n ID, but no template or resources. The stack will be in the REVIEW_IN_PROGRESS\n state until you execute the change set.

\n

By default, CloudFormation specifies UPDATE. You can't use the\n UPDATE type to create a change set for a new stack or the CREATE\n type to create a change set for an existing stack.

" } }, "ResourcesToImport": { @@ -2627,19 +2627,19 @@ "IncludeNestedStacks": { "target": "com.amazonaws.cloudformation#IncludeNestedStacks", "traits": { - "smithy.api#documentation": "

Creates a change set for the all nested stacks specified in the template. The default behavior of this action is\n set to False. To include nested sets in a change set, specify True.

" + "smithy.api#documentation": "

Creates a change set for the all nested stacks specified in the template. The default\n behavior of this action is set to False. To include nested sets in a change set,\n specify True.

" } }, "OnStackFailure": { "target": "com.amazonaws.cloudformation#OnStackFailure", "traits": { - "smithy.api#documentation": "

Determines what action will be taken if stack creation fails. If this parameter is specified, the\n DisableRollback parameter to the ExecuteChangeSet API operation must\n not be specified. This must be one of these values:

\n
    \n
  • \n

    \n DELETE - Deletes the change set if the stack creation fails. This is only valid when the\n ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status\n of the stack is DELETE_FAILED.

    \n
  • \n
  • \n

    \n DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying\n true for the DisableRollback parameter to the ExecuteChangeSet API\n operation.

    \n
  • \n
  • \n

    \n ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying\n false for the DisableRollback parameter to the ExecuteChangeSet API\n operation.

    \n
  • \n
\n

For nested stacks, when the OnStackFailure parameter is set to DELETE for the change\n set for the parent stack, any failure in a child stack will cause the parent stack creation to fail and all stacks to\n be deleted.

" + "smithy.api#documentation": "

Determines what action will be taken if stack creation fails. If this parameter is\n specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these\n values:

\n
    \n
  • \n

    \n DELETE - Deletes the change set if the stack creation fails. This is only\n valid when the ChangeSetType parameter is set to CREATE. If the\n deletion of the stack fails, the status of the stack is DELETE_FAILED.

    \n
  • \n
  • \n

    \n DO_NOTHING - if the stack creation fails, do nothing. This is equivalent\n to specifying true for the DisableRollback parameter to the\n ExecuteChangeSet API operation.

    \n
  • \n
  • \n

    \n ROLLBACK - if the stack creation fails, roll back the stack. This is\n equivalent to specifying false for the DisableRollback parameter\n to the ExecuteChangeSet API operation.

    \n
  • \n
\n

For nested stacks, when the OnStackFailure parameter is set to\n DELETE for the change set for the parent stack, any failure in a child stack\n will cause the parent stack creation to fail and all stacks to be deleted.

" } }, "ImportExistingResources": { "target": "com.amazonaws.cloudformation#ImportExistingResources", "traits": { - "smithy.api#documentation": "

Indicates if the change set imports resources that already exist.

\n \n

This parameter can only import resources that have custom names in templates. For more information, see name type in the\n CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances,\n use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation\n management in the CloudFormation User Guide.

\n
" + "smithy.api#documentation": "

Indicates if the change set imports resources that already exist.

\n \n

This parameter can only import resources that have custom names in templates. For more\n information, see name type in the\n CloudFormation User Guide. To import resources that do not accept custom\n names, such as EC2 instances, use the resource import feature instead. For more information,\n see Import Amazon Web Services resources into\n a CloudFormation stack with a resource import in the\n CloudFormation User Guide.

\n
" } } }, @@ -2689,7 +2689,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a template from existing resources that are not already managed with CloudFormation. You can check the status of\n the template generation using the DescribeGeneratedTemplate API action.

", + "smithy.api#documentation": "

Creates a template from existing resources that are not already managed with CloudFormation.\n You can check the status of the template generation using the\n DescribeGeneratedTemplate API action.

", "smithy.api#examples": [ { "title": "To create a generated template", @@ -2724,7 +2724,7 @@ "Resources": { "target": "com.amazonaws.cloudformation#ResourceDefinitions", "traits": { - "smithy.api#documentation": "

An optional list of resources to be included in the generated template.

\n

If no resources are specified,the template will be created without any resources. Resources can be added to the\n template using the UpdateGeneratedTemplate API action.

" + "smithy.api#documentation": "

An optional list of resources to be included in the generated template.

\n

If no resources are specified,the template will be created without any resources.\n Resources can be added to the template using the UpdateGeneratedTemplate API\n action.

" } }, "GeneratedTemplateName": { @@ -2738,13 +2738,13 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "

An optional name or ARN of a stack to use as the base stack for the generated template.

" + "smithy.api#documentation": "

An optional name or ARN of a stack to use as the base stack for the generated\n template.

" } }, "TemplateConfiguration": { "target": "com.amazonaws.cloudformation#TemplateConfiguration", "traits": { - "smithy.api#documentation": "

The configuration details of the generated template, including the DeletionPolicy and\n UpdateReplacePolicy.

" + "smithy.api#documentation": "

The configuration details of the generated template, including the\n DeletionPolicy and UpdateReplacePolicy.

" } } }, @@ -2789,7 +2789,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a stack as specified in the template. After the call completes successfully, the stack creation starts.\n You can check the status of the stack through the DescribeStacks operation.

" + "smithy.api#documentation": "

Creates a stack as specified in the template. After the call completes successfully, the\n stack creation starts. You can check the status of the stack through the DescribeStacks operation.

\n

For more information about creating a stack and monitoring stack progress, see Managing Amazon Web Services\n resources as a single unit with CloudFormation stacks in the\n CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#CreateStackInput": { @@ -2799,110 +2799,110 @@ "target": "com.amazonaws.cloudformation#StackName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name that's associated with the stack. The name must be unique in the Region in which you are creating the\n stack.

\n \n

A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an\n alphabetical character and can't be longer than 128 characters.

\n
", + "smithy.api#documentation": "

The name that's associated with the stack. The name must be unique in the Region in which\n you are creating the stack.

\n \n

A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It\n must start with an alphabetical character and can't be longer than 128 characters.

\n
", "smithy.api#required": {} } }, "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For\n more information, go to Template anatomy in the CloudFormation User Guide.

\n

Conditional: You must specify either the TemplateBody or the TemplateURL parameter,\n but not both.

" + "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum\n length of 51,200 bytes.

\n

Conditional: You must specify either the TemplateBody or the\n TemplateURL parameter, but not both.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's\n located in an Amazon S3 bucket or a Systems Manager document. For more information, go to the Template anatomy in the\n CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://.

\n

Conditional: You must specify either the TemplateBody or the TemplateURL parameter,\n but not both.

" + "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template (max size:\n 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location\n for an Amazon S3 bucket must start with https://.

\n

Conditional: You must specify either the TemplateBody or the\n TemplateURL parameter, but not both.

" } }, "Parameters": { "target": "com.amazonaws.cloudformation#Parameters", "traits": { - "smithy.api#documentation": "

A list of Parameter structures that specify input parameters for the stack. For more information,\n see the Parameter\n data type.

" + "smithy.api#documentation": "

A list of Parameter structures that specify input parameters for the stack.\n For more information, see the Parameter data\n type.

" } }, "DisableRollback": { "target": "com.amazonaws.cloudformation#DisableRollback", "traits": { - "smithy.api#documentation": "

Set to true to disable rollback of the stack if stack creation failed. You can specify either\n DisableRollback or OnFailure, but not both.

\n

Default: false\n

" + "smithy.api#documentation": "

Set to true to disable rollback of the stack if stack creation failed. You\n can specify either DisableRollback or OnFailure, but not\n both.

\n

Default: false\n

" } }, "RollbackConfiguration": { "target": "com.amazonaws.cloudformation#RollbackConfiguration", "traits": { - "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the\n specified monitoring period afterwards.

" + "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating\n operations, and for the specified monitoring period afterwards.

" } }, "TimeoutInMinutes": { "target": "com.amazonaws.cloudformation#TimeoutMinutes", "traits": { - "smithy.api#documentation": "

The amount of time that can pass before the stack status becomes CREATE_FAILED; if\n DisableRollback is not set or is set to false, the stack will be rolled back.

" + "smithy.api#documentation": "

The amount of time that can pass before the stack status becomes\n CREATE_FAILED; if DisableRollback is not set or is set to\n false, the stack will be rolled back.

" } }, "NotificationARNs": { "target": "com.amazonaws.cloudformation#NotificationARNs", "traits": { - "smithy.api#documentation": "

The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your\n Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).

" + "smithy.api#documentation": "

The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs\n using the Amazon SNS console or your Command Line Interface (CLI).

" } }, "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order\n for CloudFormation to create the stack.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account;\n for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly\n acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the CAPABILITY_IAM or\n CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must specify\n CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we recommend that you review all permissions associated with\n them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM Resources in\n CloudFormation Templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions\n like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this,\n users typically create a change set from the processed template, so that they can review the changes resulting from\n the macros before actually creating the stack. If your stack template contains one or more macros, and you choose\n to create a stack directly from the processed template, without first reviewing the resulting changes in a change\n set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which\n are macros hosted by CloudFormation.

    \n

    If you want to create a stack from a stack template that contains macros and nested\n stacks, you must create the stack directly from the template using this capability.

    \n \n

    You should only create stacks directly from a stack template that contains macros if you know what processing\n the macro performs.

    \n

    Each macro relies on an underlying Lambda service function for processing stack templates. Be\n aware that the Lambda function owner can update the function operation without CloudFormation being\n notified.

    \n
    \n

    For more information, see Using CloudFormation macros to perform custom processing\n on templates.

    \n
  • \n
\n \n

Only one of the Capabilities and ResourceType parameters can be specified.

\n
" + "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to create the stack.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we recommend that you review all\n permissions associated with them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM resources in CloudFormation templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually creating the stack. If your stack template contains one or more macros,\n and you choose to create a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    \n

    If you want to create a stack from a stack template that contains macros\n and nested stacks, you must create the stack directly from the\n template using this capability.

    \n \n

    You should only create stacks directly from a stack template that contains macros if\n you know what processing the macro performs.

    \n

    Each macro relies on an underlying Lambda service function for processing stack\n templates. Be aware that the Lambda function owner can update the function operation\n without CloudFormation being notified.

    \n
    \n

    For more information, see Perform custom processing\n on CloudFormation templates with template macros.

    \n
  • \n
\n \n

Only one of the Capabilities and ResourceType parameters can\n be specified.

\n
" } }, "ResourceTypes": { "target": "com.amazonaws.cloudformation#ResourceTypes", "traits": { - "smithy.api#documentation": "

The template resource types that you have permissions to work with for this create stack action, such as\n AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. Use the\n following syntax to describe template resource types: AWS::* (for all Amazon Web Services resources),\n Custom::* (for all custom resources), Custom::logical_ID\n (for a\n specific custom resource), AWS::service_name::* (for all resources of a particular\n Amazon Web Services service), and\n AWS::service_name::resource_logical_ID\n (for a specific\n Amazon Web Services resource).

\n

If the list of resource types doesn't include a resource that you're creating, the stack creation fails. By\n default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for\n CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management.

\n \n

Only one of the Capabilities and ResourceType parameters can be specified.

\n
" + "smithy.api#documentation": "

The template resource types that you have permissions to work with for this create stack\n action, such as AWS::EC2::Instance, AWS::EC2::*, or\n Custom::MyCustomInstance. Use the following syntax to describe template\n resource types: AWS::* (for all Amazon Web Services resources), Custom::* (for all\n custom resources), Custom::logical_ID\n (for a specific custom resource),\n AWS::service_name::* (for all resources of a particular\n Amazon Web Services service), and\n AWS::service_name::resource_logical_ID\n (for a specific Amazon Web Services resource).

\n

If the list of resource types doesn't include a resource that you're creating, the stack\n creation fails. By default, CloudFormation grants permissions to all resource types. IAM uses\n this parameter for CloudFormation-specific condition keys in IAM policies. For more information,\n see Control access with\n Identity and Access Management.

\n \n

Only one of the Capabilities and ResourceType parameters can\n be specified.

\n
" } }, "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to\n create the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role\n for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses\n this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role\n is available, CloudFormation uses a temporary session that's generated from your user credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to create the\n stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always\n uses this role for all future operations on the stack. Provided that users have permission to\n operate on the stack, CloudFormation uses this role even if the users don't have permission to\n pass it. Ensure that the role grants least privilege.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with\n the stack. If no role is available, CloudFormation uses a temporary session that's generated from\n your user credentials.

" } }, "OnFailure": { "target": "com.amazonaws.cloudformation#OnFailure", "traits": { - "smithy.api#documentation": "

Determines what action will be taken if stack creation fails. This must be one of: DO_NOTHING,\n ROLLBACK, or DELETE. You can specify either OnFailure or\n DisableRollback, but not both.

\n

Default: ROLLBACK\n

" + "smithy.api#documentation": "

Determines what action will be taken if stack creation fails. This must be one of:\n DO_NOTHING, ROLLBACK, or DELETE. You can specify\n either OnFailure or DisableRollback, but not both.

\n

Default: ROLLBACK\n

" } }, "StackPolicyBody": { "target": "com.amazonaws.cloudformation#StackPolicyBody", "traits": { - "smithy.api#documentation": "

Structure containing the stack policy body. For more information, go to Prevent Updates to Stack Resources in\n the CloudFormation User Guide. You can specify either the StackPolicyBody or the\n StackPolicyURL parameter, but not both.

" + "smithy.api#documentation": "

Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide.\n You can specify either the StackPolicyBody or the StackPolicyURL\n parameter, but not both.

" } }, "StackPolicyURL": { "target": "com.amazonaws.cloudformation#StackPolicyURL", "traits": { - "smithy.api#documentation": "

Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in\n an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with https://.\n You can specify either the StackPolicyBody or the StackPolicyURL parameter, but not\n both.

" + "smithy.api#documentation": "

Location of a file containing the stack policy. The URL must point to a policy (maximum\n size: 16 KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3\n bucket must start with https://. You can specify either the\n StackPolicyBody or the StackPolicyURL parameter, but not\n both.

" } }, "Tags": { "target": "com.amazonaws.cloudformation#Tags", "traits": { - "smithy.api#documentation": "

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in\n the stack. A maximum number of 50 tags can be specified.

" + "smithy.api#documentation": "

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the\n resources created in the stack. A maximum number of 50 tags can be specified.

" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this CreateStack request. Specify this token if you plan to retry requests\n so that CloudFormation knows that you're not attempting to create a stack with the same name. You might retry\n CreateStack requests to ensure that CloudFormation successfully received them.

\n

All events initiated by a given stack operation are assigned the same client request token, which you can use to\n track operations. For example, if you execute a CreateStack operation with the token\n token1, then all the StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack operations that are\n initiated from the console use the token format Console-StackOperation-ID, which helps you\n easily identify the stack operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "smithy.api#documentation": "

A unique identifier for this CreateStack request. Specify this token if you\n plan to retry requests so that CloudFormation knows that you're not attempting to create a stack\n with the same name. You might retry CreateStack requests to ensure that\n CloudFormation successfully received them.

\n

All events initiated by a given stack operation are assigned the same client request\n token, which you can use to track operations. For example, if you execute a\n CreateStack operation with the token token1, then all the\n StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack\n operations that are initiated from the console use the token format\n Console-StackOperation-ID, which helps you easily identify the stack\n operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "EnableTerminationProtection": { "target": "com.amazonaws.cloudformation#EnableTerminationProtection", "traits": { - "smithy.api#documentation": "

Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with\n termination protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From\n Being Deleted in the CloudFormation User Guide. Termination protection is deactivated on stacks\n by default.

\n

For nested\n stacks, termination protection is set on the root stack and can't be changed directly on the nested\n stack.

" + "smithy.api#documentation": "

Whether to enable termination protection on the specified stack. If a user attempts to\n delete a stack with termination protection enabled, the operation fails and the stack remains\n unchanged. For more information, see Protect CloudFormation\n stacks from being deleted in the CloudFormation User Guide. Termination\n protection is deactivated on stacks by default.

\n

For nested stacks,\n termination protection is set on the root stack and can't be changed directly on the nested\n stack.

" } }, "RetainExceptOnCreate": { "target": "com.amazonaws.cloudformation#RetainExceptOnCreate", "traits": { - "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls back. This includes\n newly created resources marked with a deletion policy of Retain.

\n

Default: false\n

" + "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation\n rolls back. This includes newly created resources marked with a deletion policy of\n Retain.

\n

Default: false\n

" } } }, @@ -2940,7 +2940,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack\n instance refers to a stack in a specific account and Region. You must specify at least one value for either\n Accounts or DeploymentTargets, and you must specify at least one value for\n Regions.

" + "smithy.api#documentation": "

Creates stack instances for the specified accounts, within the specified Amazon Web Services Regions. A\n stack instance refers to a stack in a specific account and Region. You must specify at least\n one value for either Accounts or DeploymentTargets, and you must\n specify at least one value for Regions.

" } }, "com.amazonaws.cloudformation#CreateStackInstancesInput": { @@ -2950,34 +2950,34 @@ "target": "com.amazonaws.cloudformation#StackSetName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or unique ID of the stack set that you want to create stack instances from.

", + "smithy.api#documentation": "

The name or unique ID of the stack set that you want to create stack instances\n from.

", "smithy.api#required": {} } }, "Accounts": { "target": "com.amazonaws.cloudformation#AccountList", "traits": { - "smithy.api#documentation": "

[Self-managed permissions] The names of one or more Amazon Web Services accounts that you want to create stack\n instances in the specified Region(s) for.

\n

You can specify Accounts or DeploymentTargets, but not both.

" + "smithy.api#documentation": "

[Self-managed permissions] The names of one or more Amazon Web Services accounts that you want to create\n stack instances in the specified Region(s) for.

\n

You can specify Accounts or DeploymentTargets, but not\n both.

" } }, "DeploymentTargets": { "target": "com.amazonaws.cloudformation#DeploymentTargets", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts for which to create stack instances in the\n specified Amazon Web Services Regions.

\n

You can specify Accounts or DeploymentTargets, but not both.

" + "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts for which to create stack\n instances in the specified Amazon Web Services Regions.

\n

You can specify Accounts or DeploymentTargets, but not\n both.

" } }, "Regions": { "target": "com.amazonaws.cloudformation#RegionList", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The names of one or more Amazon Web Services Regions where you want to create stack instances using the specified\n Amazon Web Services accounts.

", + "smithy.api#documentation": "

The names of one or more Amazon Web Services Regions where you want to create stack instances using the\n specified Amazon Web Services accounts.

", "smithy.api#required": {} } }, "ParameterOverrides": { "target": "com.amazonaws.cloudformation#Parameters", "traits": { - "smithy.api#documentation": "

A list of stack set parameters whose values you want to override in the selected stack instances.

\n

Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values\n during stack instance operations:

\n
    \n
  • \n

    To override the current value for a parameter, include the parameter and specify its value.

    \n
  • \n
  • \n

    To leave an overridden parameter set to its present value, include the parameter and specify\n UsePreviousValue as true. (You can't specify both a value and set\n UsePreviousValue to true.)

    \n
  • \n
  • \n

    To set an overridden parameter back to the value specified in the stack set, specify a parameter list but\n don't include the parameter in the list.

    \n
  • \n
  • \n

    To leave all parameters set to their present values, don't specify this property at all.

    \n
  • \n
\n

During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their\n overridden value.

\n

You can only override the parameter values that are specified in the stack set; to add or\n delete a parameter itself, use UpdateStackSet to update the stack set\n template.

" + "smithy.api#documentation": "

A list of stack set parameters whose values you want to override in the selected stack\n instances.

\n

Any overridden parameter values will be applied to all stack instances in the specified\n accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how\n CloudFormation sets parameter values during stack instance operations:

\n
    \n
  • \n

    To override the current value for a parameter, include the parameter and specify its\n value.

    \n
  • \n
  • \n

    To leave an overridden parameter set to its present value, include the parameter and\n specify UsePreviousValue as true. (You can't specify both a\n value and set UsePreviousValue to true.)

    \n
  • \n
  • \n

    To set an overridden parameter back to the value specified in the stack set, specify a\n parameter list but don't include the parameter in the list.

    \n
  • \n
  • \n

    To leave all parameters set to their present values, don't specify this property at\n all.

    \n
  • \n
\n

During stack set updates, any parameter values overridden for a stack instance aren't\n updated, but retain their overridden value.

\n

You can only override the parameter values that are specified in the\n stack set; to add or delete a parameter itself, use UpdateStackSet\n to update the stack set template.

" } }, "OperationPreferences": { @@ -2989,14 +2989,14 @@ "OperationId": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

The unique identifier for this stack set operation.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set\n operation only once, even if you retry the request multiple times. You might retry stack set operation requests to\n ensure that CloudFormation successfully received them.

\n

If you don't specify an operation ID, the SDK generates one automatically.

\n

Repeating this stack set operation with a new operation ID retries all stack instances whose status is\n OUTDATED.

", + "smithy.api#documentation": "

The unique identifier for this stack set operation.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation\n performs the stack set operation only once, even if you retry the request multiple times. You\n might retry stack set operation requests to ensure that CloudFormation successfully received\n them.

\n

If you don't specify an operation ID, the SDK generates one\n automatically.

\n

Repeating this stack set operation with a new operation ID retries all stack instances\n whose status is OUTDATED.

", "smithy.api#idempotencyToken": {} } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -3063,32 +3063,32 @@ "target": "com.amazonaws.cloudformation#StackSetName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name to associate with the stack set. The name must be unique in the Region where you create your stack\n set.

\n \n

A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It must start with an\n alphabetic character and can't be longer than 128 characters.

\n
", + "smithy.api#documentation": "

The name to associate with the stack set. The name must be unique in the Region where you\n create your stack set.

\n \n

A stack name can contain only alphanumeric characters (case-sensitive) and hyphens. It\n must start with an alphabetic character and can't be longer than 128 characters.

\n
", "smithy.api#required": {} } }, "Description": { "target": "com.amazonaws.cloudformation#Description", "traits": { - "smithy.api#documentation": "

A description of the stack set. You can use the description to identify the stack set's purpose or other\n important information.

" + "smithy.api#documentation": "

A description of the stack set. You can use the description to identify the stack set's\n purpose or other important information.

" } }, "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200\n bytes. For more information, see Template Anatomy in the\n CloudFormation User Guide.

\n

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "smithy.api#documentation": "

The structure that contains the template body, with a minimum length of 1 byte and a\n maximum length of 51,200 bytes.

\n

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but\n not both.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

The location of the file that contains the template body. The URL must point to a template (maximum size:\n 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. For more information, see\n Template Anatomy\n in the CloudFormation User Guide.

\n

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but not both.

" + "smithy.api#documentation": "

The location of the file that contains the template body. The URL must point to a template\n (maximum size: 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager\n document.

\n

Conditional: You must specify either the TemplateBody or the TemplateURL parameter, but\n not both.

" } }, "StackId": { "target": "com.amazonaws.cloudformation#StackId", "traits": { - "smithy.api#documentation": "

The stack ID you are importing into a new stack set. Specify the Amazon Resource Name (ARN) of the stack.

" + "smithy.api#documentation": "

The stack ID you are importing into a new stack set. Specify the Amazon Resource Name\n (ARN) of the stack.

" } }, "Parameters": { @@ -3100,56 +3100,56 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack set template contains certain capabilities in\n order for CloudFormation to create the stack set and related stack instances.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account;\n for example, by creating new Identity and Access Management (IAM) users. For those stack sets, you must explicitly\n acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the CAPABILITY_IAM or\n CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must specify\n CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we recommend that you review all permissions associated with\n them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM Resources in\n CloudFormation Templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some templates reference macros. If your stack set template references one or more macros, you must create the\n stack set directly from the processed template, without first reviewing the resulting changes in a change set. To\n create the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to\n Perform Custom Processing on Templates.

    \n \n

    Stack sets with service-managed permissions don't currently support the use of macros in templates. (This\n includes the AWS::Include and AWS::Serverless transforms, which\n are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions,\n if you reference a macro in your template the stack set operation will fail.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack set template contains\n certain capabilities in order for CloudFormation to create the stack set and related stack\n instances.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account; for example, by creating new IAM users. For those stack sets, you must\n explicitly acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we recommend that you review all\n permissions associated with them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM resources in CloudFormation templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some templates reference macros. If your stack set template references one or more\n macros, you must create the stack set directly from the processed template, without first\n reviewing the resulting changes in a change set. To create the stack set directly, you\n must acknowledge this capability. For more information, see Using CloudFormation Macros to\n Perform Custom Processing on Templates.

    \n \n

    Stack sets with service-managed permissions don't currently support the use of\n macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if\n you specify this capability for a stack set with service-managed permissions, if you\n reference a macro in your template the stack set operation will fail.

    \n
    \n
  • \n
" } }, "Tags": { "target": "com.amazonaws.cloudformation#Tags", "traits": { - "smithy.api#documentation": "

The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates\n these tags to supported resources that are created in the stacks. A maximum number of 50 tags can be\n specified.

\n

If you specify tags as part of a CreateStackSet action, CloudFormation checks to see if you have the\n required IAM permission to tag resources. If you don't, the entire CreateStackSet action fails with an\n access denied error, and the stack set is not created.

" + "smithy.api#documentation": "

The key-value pairs to associate with this stack set and the stacks created from it.\n CloudFormation also propagates these tags to supported resources that are created in the stacks. A\n maximum number of 50 tags can be specified.

\n

If you specify tags as part of a CreateStackSet action, CloudFormation checks to\n see if you have the required IAM permission to tag resources. If you don't, the entire\n CreateStackSet action fails with an access denied error, and the\n stack set is not created.

" } }, "AdministrationRoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.

\n

Specify an IAM role only if you are using customized administrator roles to control which users or groups can\n manage specific stack sets within the same administrator account. For more information, see Prerequisites: Granting\n Permissions for Stack Set Operations in the CloudFormation User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to use to create this stack set.

\n

Specify an IAM role only if you are using customized administrator roles to control\n which users or groups can manage specific stack sets within the same administrator account.\n For more information, see Prerequisites: Granting\n Permissions for Stack Set Operations in the\n CloudFormation User Guide.

" } }, "ExecutionRoleName": { "target": "com.amazonaws.cloudformation#ExecutionRoleName", "traits": { - "smithy.api#documentation": "

The name of the IAM execution role to use to create the stack set. If you do not specify an execution role,\n CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

\n

Specify an IAM role only if you are using customized execution roles to control which stack resources users\n and groups can include in their stack sets.

" + "smithy.api#documentation": "

The name of the IAM execution role to use to create the stack set. If you do not specify\n an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole\n role for the stack set operation.

\n

Specify an IAM role only if you are using customized execution roles to control which\n stack resources users and groups can include in their stack sets.

" } }, "PermissionModel": { "target": "com.amazonaws.cloudformation#PermissionModels", "traits": { - "smithy.api#documentation": "

Describes how the IAM roles required for stack set operations are created. By default,\n SELF-MANAGED is specified.

\n " + "smithy.api#documentation": "

Describes how the IAM roles required for stack set operations are created. By default,\n SELF-MANAGED is specified.

\n " } }, "AutoDeployment": { "target": "com.amazonaws.cloudformation#AutoDeployment", "traits": { - "smithy.api#documentation": "

Describes whether StackSets automatically deploys to Organizations accounts that are added to the target\n organization or organizational unit (OU). Specify only if PermissionModel is\n SERVICE_MANAGED.

" + "smithy.api#documentation": "

Describes whether StackSets automatically deploys to Organizations accounts that\n are added to the target organization or organizational unit (OU). Specify only if\n PermissionModel is SERVICE_MANAGED.

" } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    To create a stack set with service-managed permissions while signed in to the management account,\n specify SELF.

    \n
  • \n
  • \n

    To create a stack set with service-managed permissions while signed in to a delegated administrator account,\n specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated admin in the management account.\n For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
\n

Stack sets with service-managed permissions are created in the management account, including stack\n sets that are created by delegated administrators.

" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    To create a stack set with service-managed permissions while signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    To create a stack set with service-managed permissions while signed in to a delegated\n administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated admin in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
\n

Stack sets with service-managed permissions are created in the management account, including stack sets that are created by delegated\n administrators.

" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this CreateStackSet request. Specify this token if you plan to retry\n requests so that CloudFormation knows that you're not attempting to create another stack set with the same name. You might\n retry CreateStackSet requests to ensure that CloudFormation successfully received them.

\n

If you don't specify an operation ID, the SDK generates one automatically.

", + "smithy.api#documentation": "

A unique identifier for this CreateStackSet request. Specify this token if\n you plan to retry requests so that CloudFormation knows that you're not attempting to create\n another stack set with the same name. You might retry CreateStackSet requests to\n ensure that CloudFormation successfully received them.

\n

If you don't specify an operation ID, the SDK generates one\n automatically.

", "smithy.api#idempotencyToken": {} } }, "ManagedExecution": { "target": "com.amazonaws.cloudformation#ManagedExecution", "traits": { - "smithy.api#documentation": "

Describes whether StackSets performs non-conflicting operations concurrently and queues conflicting\n operations.

" + "smithy.api#documentation": "

Describes whether StackSets performs non-conflicting operations concurrently and queues\n conflicting operations.

" } } }, @@ -3208,7 +3208,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deactivates trusted access with Organizations. If trusted access is deactivated, the management account does not have permissions to create and manage service-managed StackSets for your\n organization.

" + "smithy.api#documentation": "

Deactivates trusted access with Organizations. If trusted access is deactivated,\n the management account does not have permissions to create and manage\n service-managed StackSets for your organization.

" } }, "com.amazonaws.cloudformation#DeactivateOrganizationsAccessInput": { @@ -3242,7 +3242,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deactivates a public extension that was previously activated in this account and Region.

\n

Once deactivated, an extension can't be used in any CloudFormation operation. This includes stack update operations\n where the stack template includes the extension, even if no updates are being made to the extension. In addition,\n deactivated extensions aren't automatically updated if a new version of the extension is released.

", + "smithy.api#documentation": "

Deactivates a public extension that was previously activated in this account and\n Region.

\n

Once deactivated, an extension can't be used in any CloudFormation operation. This includes\n stack update operations where the stack template includes the extension, even if no updates\n are being made to the extension. In addition, deactivated extensions aren't automatically\n updated if a new version of the extension is released.

", "smithy.api#idempotent": {} } }, @@ -3252,19 +3252,19 @@ "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The type name of the extension, in this account and Region. If you specified a type name alias when enabling the\n extension, use the type name alias.

\n

Conditional: You must specify either Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The type name of the extension, in this account and Region. If you specified a type name\n alias when enabling the extension, use the type name alias.

\n

Conditional: You must specify either Arn, or TypeName and\n Type.

" } }, "Type": { "target": "com.amazonaws.cloudformation#ThirdPartyType", "traits": { - "smithy.api#documentation": "

The extension type.

\n

Conditional: You must specify either Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The extension type.

\n

Conditional: You must specify either Arn, or TypeName and\n Type.

" } }, "Arn": { "target": "com.amazonaws.cloudformation#PrivateTypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

Conditional: You must specify either Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

Conditional: You must specify either Arn, or TypeName and\n Type.

" } } }, @@ -3293,7 +3293,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

\n

If the call successfully completes, CloudFormation successfully deleted the change set.

\n

If IncludeNestedStacks specifies True during the creation of the nested change set,\n then DeleteChangeSet will delete all change sets that belong to the stacks hierarchy and will also\n delete all change sets for nested stacks with the status of REVIEW_IN_PROGRESS.

" + "smithy.api#documentation": "

Deletes the specified change set. Deleting change sets ensures that no one executes the\n wrong change set.

\n

If the call successfully completes, CloudFormation successfully deleted the change set.

\n

If IncludeNestedStacks specifies True during the creation of the\n nested change set, then DeleteChangeSet will delete all change sets that belong\n to the stacks hierarchy and will also delete all change sets for nested stacks with the status\n of REVIEW_IN_PROGRESS.

" } }, "com.amazonaws.cloudformation#DeleteChangeSetInput": { @@ -3310,7 +3310,7 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { - "smithy.api#documentation": "

If you specified the name of a change set to delete, specify the stack name or Amazon Resource Name (ARN) that's\n associated with it.

" + "smithy.api#documentation": "

If you specified the name of a change set to delete, specify the stack name or Amazon\n Resource Name (ARN) that's associated with it.

" } } }, @@ -3386,7 +3386,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks don't\n show up in the DescribeStacks operation if the deletion has been completed successfully.

" + "smithy.api#documentation": "

Deletes a specified stack. Once the call completes successfully, stack deletion starts.\n Deleted stacks don't show up in the DescribeStacks operation if the deletion\n has been completed successfully.

\n

For more information about deleting a stack, see Delete a stack from\n the CloudFormation console in the CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#DeleteStackInput": { @@ -3403,25 +3403,25 @@ "RetainResources": { "target": "com.amazonaws.cloudformation#RetainResources", "traits": { - "smithy.api#documentation": "

For stacks in the DELETE_FAILED state, a list of resource logical IDs that are associated with the\n resources you want to retain. During deletion, CloudFormation deletes the stack but doesn't delete the retained\n resources.

\n

Retaining resources is useful when you can't delete a resource, such as a non-empty S3 bucket, but you want to\n delete the stack.

" + "smithy.api#documentation": "

For stacks in the DELETE_FAILED state, a list of resource logical IDs that\n are associated with the resources you want to retain. During deletion, CloudFormation deletes the\n stack but doesn't delete the retained resources.

\n

Retaining resources is useful when you can't delete a resource, such as a non-empty S3\n bucket, but you want to delete the stack.

" } }, "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to\n delete the stack. CloudFormation uses the role's credentials to make calls on your behalf.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role\n is available, CloudFormation uses a temporary session that's generated from your user credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to delete the\n stack. CloudFormation uses the role's credentials to make calls on your behalf.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with\n the stack. If no role is available, CloudFormation uses a temporary session that's generated from\n your user credentials.

" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this DeleteStack request. Specify this token if you plan to retry requests\n so that CloudFormation knows that you're not attempting to delete a stack with the same name. You might retry\n DeleteStack requests to ensure that CloudFormation successfully received them.

\n

All events initiated by a given stack operation are assigned the same client request token, which you can use to\n track operations. For example, if you execute a CreateStack operation with the token\n token1, then all the StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack operations that are\n initiated from the console use the token format Console-StackOperation-ID, which helps you\n easily identify the stack operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "smithy.api#documentation": "

A unique identifier for this DeleteStack request. Specify this token if you\n plan to retry requests so that CloudFormation knows that you're not attempting to delete a stack\n with the same name. You might retry DeleteStack requests to ensure that\n CloudFormation successfully received them.

\n

All events initiated by a given stack operation are assigned the same client request\n token, which you can use to track operations. For example, if you execute a\n CreateStack operation with the token token1, then all the\n StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack\n operations that are initiated from the console use the token format\n Console-StackOperation-ID, which helps you easily identify the stack\n operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "DeletionMode": { "target": "com.amazonaws.cloudformation#DeletionMode", "traits": { - "smithy.api#documentation": "

Specifies the deletion mode for the stack. Possible values are:

\n
    \n
  • \n

    \n STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this\n parameter.

    \n
  • \n
  • \n

    \n FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to\n resource deletion failure.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the deletion mode for the stack. Possible values are:

\n
    \n
  • \n

    \n STANDARD - Use the standard behavior. Specifying this value is the same\n as not specifying this parameter.

    \n
  • \n
  • \n

    \n FORCE_DELETE_STACK - Delete the stack if it's stuck in a\n DELETE_FAILED state due to resource deletion failure.

    \n
  • \n
" } } }, @@ -3473,13 +3473,13 @@ "Accounts": { "target": "com.amazonaws.cloudformation#AccountList", "traits": { - "smithy.api#documentation": "

[Self-managed permissions] The names of the Amazon Web Services accounts that you want to delete stack instances\n for.

\n

You can specify Accounts or DeploymentTargets, but not both.

" + "smithy.api#documentation": "

[Self-managed permissions] The names of the Amazon Web Services accounts that you want to delete stack\n instances for.

\n

You can specify Accounts or DeploymentTargets, but not\n both.

" } }, "DeploymentTargets": { "target": "com.amazonaws.cloudformation#DeploymentTargets", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts from which to delete stack instances.

\n

You can specify Accounts or DeploymentTargets, but not both.

" + "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts from which to delete\n stack instances.

\n

You can specify Accounts or DeploymentTargets, but not\n both.

" } }, "Regions": { @@ -3500,21 +3500,21 @@ "target": "com.amazonaws.cloudformation#RetainStacks", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Removes the stack instances from the specified stack set, but doesn't delete the stacks. You can't reassociate a\n retained stack or add an existing, saved stack to a new stack set.

\n

For more information, see Stack set operation\n options.

", + "smithy.api#documentation": "

Removes the stack instances from the specified stack set, but doesn't delete the stacks.\n You can't reassociate a retained stack or add an existing, saved stack to a new stack\n set.

\n

For more information, see Stack set operation options.

", "smithy.api#required": {} } }, "OperationId": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

The unique identifier for this stack set operation.

\n

If you don't specify an operation ID, the SDK generates one automatically.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set\n operation only once, even if you retry the request multiple times. You can retry stack set operation requests to\n ensure that CloudFormation successfully received them.

\n

Repeating this stack set operation with a new operation ID retries all stack instances whose status is\n OUTDATED.

", + "smithy.api#documentation": "

The unique identifier for this stack set operation.

\n

If you don't specify an operation ID, the SDK generates one\n automatically.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation\n performs the stack set operation only once, even if you retry the request multiple times. You\n can retry stack set operation requests to ensure that CloudFormation successfully received\n them.

\n

Repeating this stack set operation with a new operation ID retries all stack instances\n whose status is OUTDATED.

", "smithy.api#idempotencyToken": {} } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -3553,7 +3553,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a stack set. Before you can delete a stack set, all its member stack instances must be deleted. For more\n information about how to complete this, see DeleteStackInstances.

" + "smithy.api#documentation": "

Deletes a stack set. Before you can delete a stack set, all its member stack instances\n must be deleted. For more information about how to complete this, see DeleteStackInstances.

" } }, "com.amazonaws.cloudformation#DeleteStackSetInput": { @@ -3563,14 +3563,14 @@ "target": "com.amazonaws.cloudformation#StackSetName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or unique ID of the stack set that you're deleting. You can obtain this value by running ListStackSets.

", + "smithy.api#documentation": "

The name or unique ID of the stack set that you're deleting. You can obtain this value by\n running ListStackSets.

", "smithy.api#required": {} } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -3671,7 +3671,7 @@ } ], "traits": { - "smithy.api#documentation": "

Marks an extension or extension version as DEPRECATED in the CloudFormation registry, removing it from\n active use. Deprecated extensions or extension versions cannot be used in CloudFormation operations.

\n

To deregister an entire extension, you must individually deregister all active versions of that extension. If an\n extension has only a single active version, deregistering that version results in the extension itself being\n deregistered and marked as deprecated in the registry.

\n

You can't deregister the default version of an extension if there are other active version of that extension. If\n you do deregister the default version of an extension, the extension type itself is deregistered as well and marked\n as deprecated.

\n

To view the deprecation status of an extension or extension version, use DescribeType.

", + "smithy.api#documentation": "

Marks an extension or extension version as DEPRECATED in the CloudFormation\n registry, removing it from active use. Deprecated extensions or extension versions cannot be\n used in CloudFormation operations.

\n

To deregister an entire extension, you must individually deregister all active versions of\n that extension. If an extension has only a single active version, deregistering that version\n results in the extension itself being deregistered and marked as deprecated in the\n registry.

\n

You can't deregister the default version of an extension if there are other active version\n of that extension. If you do deregister the default version of an extension, the extension\n type itself is deregistered as well and marked as deprecated.

\n

To view the deprecation status of an extension or extension version, use DescribeType.

", "smithy.api#idempotent": {} } }, @@ -3681,25 +3681,25 @@ "Arn": { "target": "com.amazonaws.cloudformation#PrivateTypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "Type": { "target": "com.amazonaws.cloudformation#RegistryType", "traits": { - "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "VersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource\n Name (ARN) assigned to the extension version when it is registered.

" + "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of\n the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

" } } }, @@ -3723,7 +3723,7 @@ "target": "com.amazonaws.cloudformation#DescribeAccountLimitsOutput" }, "traits": { - "smithy.api#documentation": "

Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you can create in your account.\n For more information about account limits, see CloudFormation Quotas in the\n CloudFormation User Guide.

", + "smithy.api#documentation": "

Retrieves your account's CloudFormation limits, such as the maximum number of stacks that you\n can create in your account. For more information about account limits, see Understand CloudFormation quotas in the CloudFormation User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3752,13 +3752,13 @@ "AccountLimits": { "target": "com.amazonaws.cloudformation#AccountLimitList", "traits": { - "smithy.api#documentation": "

An account limit structure that contain a list of CloudFormation account limits and their values.

" + "smithy.api#documentation": "

An account limit structure that contain a list of CloudFormation account limits and their\n values.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of limits. If no additional page\n exists, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of limits. If\n no additional page exists, this value is null.

" } } }, @@ -3781,7 +3781,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the inputs for the change set and a list of changes that CloudFormation will make if you execute the change set.\n For more information, see Updating Stacks Using Change\n Sets in the CloudFormation User Guide.

", + "smithy.api#documentation": "

Returns the inputs for the change set and a list of changes that CloudFormation will make if\n you execute the change set. For more information, see Update\n CloudFormation stacks using change sets in the\n CloudFormation User Guide.

", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -3835,7 +3835,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns hook-related information for the change set and a list of changes that CloudFormation makes when you run the\n change set.

" + "smithy.api#documentation": "

Returns hook-related information for the change set and a list of changes that CloudFormation\n makes when you run the change set.

" } }, "com.amazonaws.cloudformation#DescribeChangeSetHooksInput": { @@ -3852,19 +3852,19 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { - "smithy.api#documentation": "

If you specified the name of a change set, specify the stack name or stack ID (ARN) of the change set you want\n to describe.

" + "smithy.api#documentation": "

If you specified the name of a change set, specify the stack name or stack ID (ARN) of the\n change set you want to describe.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string, provided by the DescribeChangeSetHooks response output, that identifies the next page of\n information that you want to retrieve.

" + "smithy.api#documentation": "

A string, provided by the DescribeChangeSetHooks response output, that\n identifies the next page of information that you want to retrieve.

" } }, "LogicalResourceId": { "target": "com.amazonaws.cloudformation#LogicalResourceId", "traits": { - "smithy.api#documentation": "

If specified, lists only the hooks related to the specified LogicalResourceId.

" + "smithy.api#documentation": "

If specified, lists only the hooks related to the specified\n LogicalResourceId.

" } } }, @@ -3936,19 +3936,19 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { - "smithy.api#documentation": "

If you specified the name of a change set, specify the stack name or ID (ARN) of the change set you want to\n describe.

" + "smithy.api#documentation": "

If you specified the name of a change set, specify the stack name or ID (ARN) of the\n change set you want to describe.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string (provided by the DescribeChangeSet response output) that identifies the next page of\n information that you want to retrieve.

" + "smithy.api#documentation": "

A string (provided by the DescribeChangeSet response output) that\n identifies the next page of information that you want to retrieve.

" } }, "IncludePropertyValues": { "target": "com.amazonaws.cloudformation#IncludePropertyValues", "traits": { - "smithy.api#documentation": "

If true, the returned changes include detailed changes in the property values.

" + "smithy.api#documentation": "

If true, the returned changes include detailed changes in the property\n values.

" } } }, @@ -3993,7 +3993,7 @@ "Parameters": { "target": "com.amazonaws.cloudformation#Parameters", "traits": { - "smithy.api#documentation": "

A list of Parameter structures that describes the input parameters and their values used to create\n the change set. For more information, see the Parameter data type.

" + "smithy.api#documentation": "

A list of Parameter structures that describes the input parameters and their\n values used to create the change set. For more information, see the Parameter data type.

" } }, "CreationTime": { @@ -4005,37 +4005,37 @@ "ExecutionStatus": { "target": "com.amazonaws.cloudformation#ExecutionStatus", "traits": { - "smithy.api#documentation": "

If the change set execution status is AVAILABLE, you can execute the change set. If you can't\n execute the change set, the status indicates why. For example, a change set might be in an UNAVAILABLE\n state because CloudFormation is still creating it or in an OBSOLETE state because the stack was already\n updated.

" + "smithy.api#documentation": "

If the change set execution status is AVAILABLE, you can execute the change\n set. If you can't execute the change set, the status indicates why. For example, a change set\n might be in an UNAVAILABLE state because CloudFormation is still creating it or in an\n OBSOLETE state because the stack was already updated.

" } }, "Status": { "target": "com.amazonaws.cloudformation#ChangeSetStatus", "traits": { - "smithy.api#documentation": "

The current status of the change set, such as CREATE_IN_PROGRESS, CREATE_COMPLETE, or\n FAILED.

" + "smithy.api#documentation": "

The current status of the change set, such as CREATE_IN_PROGRESS,\n CREATE_COMPLETE, or FAILED.

" } }, "StatusReason": { "target": "com.amazonaws.cloudformation#ChangeSetStatusReason", "traits": { - "smithy.api#documentation": "

A description of the change set's status. For example, if your attempt to create a change set failed, CloudFormation shows the error message.

" + "smithy.api#documentation": "

A description of the change set's status. For example, if your attempt to create a change\n set failed, CloudFormation shows the error message.

" } }, "NotificationARNs": { "target": "com.amazonaws.cloudformation#NotificationARNs", "traits": { - "smithy.api#documentation": "

The ARNs of the Amazon Simple Notification Service (Amazon SNS) topics that will be associated with the stack if you\n execute the change set.

" + "smithy.api#documentation": "

The ARNs of the Amazon SNS topics that will be associated with the stack if you execute the\n change set.

" } }, "RollbackConfiguration": { "target": "com.amazonaws.cloudformation#RollbackConfiguration", "traits": { - "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified\n monitoring period afterwards.

" + "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating\n operations, and for the specified monitoring period afterwards.

" } }, "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

If you execute the change set, the list of capabilities that were explicitly acknowledged when the change set\n was created.

" + "smithy.api#documentation": "

If you execute the change set, the list of capabilities that were explicitly acknowledged\n when the change set was created.

" } }, "Tags": { @@ -4047,13 +4047,13 @@ "Changes": { "target": "com.amazonaws.cloudformation#Changes", "traits": { - "smithy.api#documentation": "

A list of Change structures that describes the resources CloudFormation changes if you execute the change\n set.

" + "smithy.api#documentation": "

A list of Change structures that describes the resources CloudFormation changes\n if you execute the change set.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB, a string that identifies the next page of changes. If there is no additional page,\n this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB, a string that identifies the next page of changes. If there is\n no additional page, this value is null.

" } }, "IncludeNestedStacks": { @@ -4065,25 +4065,25 @@ "ParentChangeSetId": { "target": "com.amazonaws.cloudformation#ChangeSetId", "traits": { - "smithy.api#documentation": "

Specifies the change set ID of the parent change set in the current nested change set hierarchy.

" + "smithy.api#documentation": "

Specifies the change set ID of the parent change set in the current nested change set\n hierarchy.

" } }, "RootChangeSetId": { "target": "com.amazonaws.cloudformation#ChangeSetId", "traits": { - "smithy.api#documentation": "

Specifies the change set ID of the root change set in the current nested change set hierarchy.

" + "smithy.api#documentation": "

Specifies the change set ID of the root change set in the current nested change set\n hierarchy.

" } }, "OnStackFailure": { "target": "com.amazonaws.cloudformation#OnStackFailure", "traits": { - "smithy.api#documentation": "

Determines what action will be taken if stack creation fails. When this parameter is specified, the\n DisableRollback parameter to the ExecuteChangeSet API operation must\n not be specified. This must be one of these values:

\n
    \n
  • \n

    \n DELETE - Deletes the change set if the stack creation fails. This is only valid when the\n ChangeSetType parameter is set to CREATE. If the deletion of the stack fails, the status\n of the stack is DELETE_FAILED.

    \n
  • \n
  • \n

    \n DO_NOTHING - if the stack creation fails, do nothing. This is equivalent to specifying\n true for the DisableRollback parameter to the ExecuteChangeSet API\n operation.

    \n
  • \n
  • \n

    \n ROLLBACK - if the stack creation fails, roll back the stack. This is equivalent to specifying\n false for the DisableRollback parameter to the ExecuteChangeSet API\n operation.

    \n
  • \n
" + "smithy.api#documentation": "

Determines what action will be taken if stack creation fails. When this parameter is\n specified, the DisableRollback parameter to the ExecuteChangeSet API operation must not be specified. This must be one of these\n values:

\n
    \n
  • \n

    \n DELETE - Deletes the change set if the stack creation fails. This is only\n valid when the ChangeSetType parameter is set to CREATE. If the\n deletion of the stack fails, the status of the stack is DELETE_FAILED.

    \n
  • \n
  • \n

    \n DO_NOTHING - if the stack creation fails, do nothing. This is equivalent\n to specifying true for the DisableRollback parameter to the\n ExecuteChangeSet API operation.

    \n
  • \n
  • \n

    \n ROLLBACK - if the stack creation fails, roll back the stack. This is\n equivalent to specifying false for the DisableRollback parameter\n to the ExecuteChangeSet API operation.

    \n
  • \n
" } }, "ImportExistingResources": { "target": "com.amazonaws.cloudformation#ImportExistingResources", "traits": { - "smithy.api#documentation": "

Indicates if the change set imports resources that already exist.

\n \n

This parameter can only import resources that have custom names in templates. To import\n resources that do not accept custom names, such as EC2 instances, use the resource import feature instead.

\n
" + "smithy.api#documentation": "

Indicates if the change set imports resources that already exist.

\n \n

This parameter can only import resources that have custom names in\n templates. To import resources that do not accept custom names, such as EC2 instances, use\n the resource import\n feature instead.

\n
" } } }, @@ -4106,7 +4106,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a generated template. The output includes details about the progress of the creation of a generated\n template started by a CreateGeneratedTemplate API action or the update of a generated template started\n with an UpdateGeneratedTemplate API action.

" + "smithy.api#documentation": "

Describes a generated template. The output includes details about the progress of the\n creation of a generated template started by a CreateGeneratedTemplate API action\n or the update of a generated template started with an UpdateGeneratedTemplate API\n action.

" } }, "com.amazonaws.cloudformation#DescribeGeneratedTemplateInput": { @@ -4131,7 +4131,7 @@ "GeneratedTemplateId": { "target": "com.amazonaws.cloudformation#GeneratedTemplateId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}.\n For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

" } }, "GeneratedTemplateName": { @@ -4143,7 +4143,7 @@ "Resources": { "target": "com.amazonaws.cloudformation#ResourceDetails", "traits": { - "smithy.api#documentation": "

A list of objects describing the details of the resources in the template generation.

" + "smithy.api#documentation": "

A list of objects describing the details of the resources in the template\n generation.

" } }, "Status": { @@ -4155,7 +4155,7 @@ "StatusReason": { "target": "com.amazonaws.cloudformation#TemplateStatusReason", "traits": { - "smithy.api#documentation": "

The reason for the current template generation status. This will provide more details if a failure\n happened.

" + "smithy.api#documentation": "

The reason for the current template generation status. This will provide more details if a\n failure happened.

" } }, "CreationTime": { @@ -4179,19 +4179,19 @@ "StackId": { "target": "com.amazonaws.cloudformation#StackId", "traits": { - "smithy.api#documentation": "

The stack ARN of the base stack if a base stack was provided when generating the template.

" + "smithy.api#documentation": "

The stack ARN of the base stack if a base stack was provided when generating the\n template.

" } }, "TemplateConfiguration": { "target": "com.amazonaws.cloudformation#TemplateConfiguration", "traits": { - "smithy.api#documentation": "

The configuration details of the generated template, including the DeletionPolicy and\n UpdateReplacePolicy.

" + "smithy.api#documentation": "

The configuration details of the generated template, including the\n DeletionPolicy and UpdateReplacePolicy.

" } }, "TotalWarnings": { "target": "com.amazonaws.cloudformation#TotalWarnings", "traits": { - "smithy.api#documentation": "

The number of warnings generated for this template. The warnings are found in the details of each of the\n resources in the template.

" + "smithy.api#documentation": "

The number of warnings generated for this template. The warnings are found in the details\n of each of the resources in the template.

" } } }, @@ -4216,7 +4216,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about the account's OrganizationAccess status. This API can be called either\n by the management account or the delegated administrator by using the CallAs parameter. This API can\n also be called without the CallAs parameter by the management account.

" + "smithy.api#documentation": "

Retrieves information about the account's OrganizationAccess status. This API\n can be called either by the management account or the delegated administrator by using the\n CallAs parameter. This API can also be called without the CallAs\n parameter by the management account.

" } }, "com.amazonaws.cloudformation#DescribeOrganizationsAccessInput": { @@ -4225,7 +4225,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -4261,7 +4261,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about a CloudFormation extension publisher.

\n

If you don't supply a PublisherId, and you have registered as an extension publisher,\n DescribePublisher returns information about your own publisher account.

\n

For more information about registering as a publisher, see:

\n ", + "smithy.api#documentation": "

Returns information about a CloudFormation extension publisher.

\n

If you don't supply a PublisherId, and you have registered as an extension\n publisher, DescribePublisher returns information about your own publisher\n account.

\n

For more information about registering as a publisher, see:

\n ", "smithy.api#idempotent": {} } }, @@ -4271,7 +4271,7 @@ "PublisherId": { "target": "com.amazonaws.cloudformation#PublisherId", "traits": { - "smithy.api#documentation": "

The ID of the extension publisher.

\n

If you don't supply a PublisherId, and you have registered as an extension publisher,\n DescribePublisher returns information about your own publisher account.

" + "smithy.api#documentation": "

The ID of the extension publisher.

\n

If you don't supply a PublisherId, and you have registered as an extension\n publisher, DescribePublisher returns information about your own publisher\n account.

" } } }, @@ -4291,13 +4291,13 @@ "PublisherStatus": { "target": "com.amazonaws.cloudformation#PublisherStatus", "traits": { - "smithy.api#documentation": "

Whether the publisher is verified. Currently, all registered publishers are verified.

" + "smithy.api#documentation": "

Whether the publisher is verified. Currently, all registered publishers are\n verified.

" } }, "IdentityProvider": { "target": "com.amazonaws.cloudformation#IdentityProvider", "traits": { - "smithy.api#documentation": "

The type of account used as the identity provider when registering this publisher with CloudFormation.

" + "smithy.api#documentation": "

The type of account used as the identity provider when registering this publisher with\n CloudFormation.

" } }, "PublisherProfile": { @@ -4350,19 +4350,19 @@ "ResourceScanId": { "target": "com.amazonaws.cloudformation#ResourceScanId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource scan. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:resourceScan/${Id}. An example is\n arn:aws:cloudformation:us-east-1:123456789012:resourceScan/f5b490f7-7ed4-428a-aa06-31ff25db0772\n .

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource scan. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:resourceScan/${Id}. An\n example is\n arn:aws:cloudformation:us-east-1:123456789012:resourceScan/f5b490f7-7ed4-428a-aa06-31ff25db0772\n .

" } }, "Status": { "target": "com.amazonaws.cloudformation#ResourceScanStatus", "traits": { - "smithy.api#documentation": "

Status of the resource scan.

\n
\n
INPROGRESS
\n
\n

The resource scan is still in progress.

\n
\n
COMPLETE
\n
\n

The resource scan is complete.

\n
\n
EXPIRED
\n
\n

The resource scan has expired.

\n
\n
FAILED
\n
\n

The resource scan has failed.

\n
\n
" + "smithy.api#documentation": "

Status of the resource scan.

\n
\n
\n \n INPROGRESS\n \n
\n
\n

The resource scan is still in progress.

\n
\n
\n \n COMPLETE\n \n
\n
\n

The resource scan is complete.

\n
\n
\n \n EXPIRED\n \n
\n
\n

The resource scan has expired.

\n
\n
\n \n FAILED\n \n
\n
\n

The resource scan has failed.

\n
\n
" } }, "StatusReason": { "target": "com.amazonaws.cloudformation#ResourceScanStatusReason", "traits": { - "smithy.api#documentation": "

The reason for the resource scan status, providing more information if a failure happened.

" + "smithy.api#documentation": "

The reason for the resource scan status, providing more information if a failure\n happened.

" } }, "StartTime": { @@ -4386,19 +4386,19 @@ "ResourceTypes": { "target": "com.amazonaws.cloudformation#ResourceTypes", "traits": { - "smithy.api#documentation": "

The list of resource types for the specified scan. Resource types are only available for scans with a\n Status set to COMPLETE or FAILED .

" + "smithy.api#documentation": "

The list of resource types for the specified scan. Resource types are only available for\n scans with a Status set to COMPLETE or FAILED .

" } }, "ResourcesScanned": { "target": "com.amazonaws.cloudformation#ResourcesScanned", "traits": { - "smithy.api#documentation": "

The number of resources that were listed. This is only available for scans with a Status set to\n COMPLETE, EXPIRED, or FAILED .

" + "smithy.api#documentation": "

The number of resources that were listed. This is only available for scans with a\n Status set to COMPLETE, EXPIRED, or FAILED\n .

" } }, "ResourcesRead": { "target": "com.amazonaws.cloudformation#ResourcesRead", "traits": { - "smithy.api#documentation": "

The number of resources that were read. This is only available for scans with a Status set to\n COMPLETE, EXPIRED, or FAILED .

\n \n

This field may be 0 if the resource scan failed with a ResourceScanLimitExceededException.

\n
" + "smithy.api#documentation": "

The number of resources that were read. This is only available for scans with a\n Status set to COMPLETE, EXPIRED, or FAILED\n .

\n \n

This field may be 0 if the resource scan failed with a\n ResourceScanLimitExceededException.

\n
" } } }, @@ -4415,7 +4415,7 @@ "target": "com.amazonaws.cloudformation#DescribeStackDriftDetectionStatusOutput" }, "traits": { - "smithy.api#documentation": "

Returns information about a stack drift detection operation. A stack drift detection operation detects whether a\n stack's actual configuration differs, or has drifted, from its expected configuration, as\n defined in the stack template and any values specified as template parameters. A stack is considered to have drifted\n if one or more of its resources have drifted. For more information about stack and resource drift, see Detecting Unregulated\n Configuration Changes to Stacks and Resources.

\n

Use DetectStackDrift to initiate a stack drift detection operation.\n DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the\n operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use\n DescribeStackResourceDrifts to return drift information about the stack and its resources.

" + "smithy.api#documentation": "

Returns information about a stack drift detection operation. A stack drift detection\n operation detects whether a stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack\n template and any values specified as template parameters. A stack is considered to have\n drifted if one or more of its resources have drifted. For more information about stack and\n resource drift, see Detect unmanaged\n configuration changes to stacks and resources with drift detection.

\n

Use DetectStackDrift to initiate a stack drift detection operation.\n DetectStackDrift returns a StackDriftDetectionId you can use to\n monitor the progress of the operation using DescribeStackDriftDetectionStatus.\n Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its\n resources.

" } }, "com.amazonaws.cloudformation#DescribeStackDriftDetectionStatusInput": { @@ -4425,7 +4425,7 @@ "target": "com.amazonaws.cloudformation#StackDriftDetectionId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the drift detection results of this operation.

\n

CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number\n of drift results CloudFormation retains for any given stack, and for how long, may vary.

", + "smithy.api#documentation": "

The ID of the drift detection results of this operation.

\n

CloudFormation generates new results, with a new drift detection ID, each time this operation\n is run. However, the number of drift results CloudFormation retains for any given stack, and for\n how long, may vary.

", "smithy.api#required": {} } } @@ -4449,21 +4449,21 @@ "target": "com.amazonaws.cloudformation#StackDriftDetectionId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the drift detection results of this operation.

\n

CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number\n of reports CloudFormation retains for any given stack, and for how long, may vary.

", + "smithy.api#documentation": "

The ID of the drift detection results of this operation.

\n

CloudFormation generates new results, with a new drift detection ID, each time this operation\n is run. However, the number of reports CloudFormation retains for any given stack, and for how\n long, may vary.

", "smithy.api#required": {} } }, "StackDriftStatus": { "target": "com.amazonaws.cloudformation#StackDriftStatus", "traits": { - "smithy.api#documentation": "

Status of the stack's actual configuration compared to its expected configuration.

\n
    \n
  • \n

    \n DRIFTED: The stack differs from its expected template configuration. A stack is considered to\n have drifted if one or more of its resources have drifted.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template\n configuration.

    \n
  • \n
  • \n

    \n IN_SYNC: The stack's actual configuration matches its expected template configuration.

    \n
  • \n
  • \n

    \n UNKNOWN: This value is reserved for future use.

    \n
  • \n
" + "smithy.api#documentation": "

Status of the stack's actual configuration compared to its expected configuration.

\n
    \n
  • \n

    \n DRIFTED: The stack differs from its expected template configuration. A\n stack is considered to have drifted if one or more of its resources have drifted.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its\n expected template configuration.

    \n
  • \n
  • \n

    \n IN_SYNC: The stack's actual configuration matches its expected template\n configuration.

    \n
  • \n
  • \n

    \n UNKNOWN: This value is reserved for future use.

    \n
  • \n
" } }, "DetectionStatus": { "target": "com.amazonaws.cloudformation#StackDriftDetectionStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The status of the stack drift detection operation.

\n
    \n
  • \n

    \n DETECTION_COMPLETE: The stack drift detection operation has successfully completed for all\n resources in the stack that support drift detection. (Resources that don't currently support stack detection remain\n unchecked.)

    \n

    If you specified logical resource IDs for CloudFormation to use as a filter for the stack drift detection operation,\n only the resources with those logical IDs are checked for drift.

    \n
  • \n
  • \n

    \n DETECTION_FAILED: The stack drift detection operation has failed for at least one resource in the\n stack. Results will be available for resources on which CloudFormation successfully completed drift detection.

    \n
  • \n
  • \n

    \n DETECTION_IN_PROGRESS: The stack drift detection operation is currently in progress.

    \n
  • \n
", + "smithy.api#documentation": "

The status of the stack drift detection operation.

\n
    \n
  • \n

    \n DETECTION_COMPLETE: The stack drift detection operation has successfully\n completed for all resources in the stack that support drift detection. (Resources that\n don't currently support stack detection remain unchecked.)

    \n

    If you specified logical resource IDs for CloudFormation to use as a filter for the stack\n drift detection operation, only the resources with those logical IDs are checked for\n drift.

    \n
  • \n
  • \n

    \n DETECTION_FAILED: The stack drift detection operation has failed for at\n least one resource in the stack. Results will be available for resources on which\n CloudFormation successfully completed drift detection.

    \n
  • \n
  • \n

    \n DETECTION_IN_PROGRESS: The stack drift detection operation is currently\n in progress.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -4476,7 +4476,7 @@ "DriftedStackResourceCount": { "target": "com.amazonaws.cloudformation#BoxedInteger", "traits": { - "smithy.api#documentation": "

Total number of stack resources that have drifted. This is NULL until the drift detection operation reaches a\n status of DETECTION_COMPLETE. This value will be 0 for stacks whose drift status is\n IN_SYNC.

" + "smithy.api#documentation": "

Total number of stack resources that have drifted. This is NULL until the drift detection\n operation reaches a status of DETECTION_COMPLETE. This value will be 0 for stacks\n whose drift status is IN_SYNC.

" } }, "Timestamp": { @@ -4501,7 +4501,7 @@ "target": "com.amazonaws.cloudformation#DescribeStackEventsOutput" }, "traits": { - "smithy.api#documentation": "

Returns all stack related events for a specified stack in reverse chronological order. For more information\n about a stack's event history, see CloudFormation stack creation\n events in the CloudFormation User Guide.

\n \n

You can list events for stacks that have failed to create or have been deleted by specifying the unique stack\n identifier (stack ID).

\n
", + "smithy.api#documentation": "

Returns all stack related events for a specified stack in reverse chronological order. For\n more information about a stack's event history, see Understand CloudFormation stack creation events in the\n CloudFormation User Guide.

\n \n

You can list events for stacks that have failed to create or have been deleted by\n specifying the unique stack identifier (stack ID).

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -4515,7 +4515,7 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "

The name or the unique stack ID that's associated with the stack, which aren't always interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

" + "smithy.api#documentation": "

The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

" } }, "NextToken": { @@ -4542,7 +4542,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of events. If no additional page\n exists, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of events. If\n no additional page exists, this value is null.

" } } }, @@ -4568,7 +4568,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the stack instance that's associated with the specified StackSet, Amazon Web Services account, and\n Amazon Web Services Region.

\n

For a list of stack instances that are associated with a specific StackSet, use ListStackInstances.

" + "smithy.api#documentation": "

Returns the stack instance that's associated with the specified StackSet, Amazon Web Services account,\n and Amazon Web Services Region.

\n

For a list of stack instances that are associated with a specific StackSet, use ListStackInstances.

" } }, "com.amazonaws.cloudformation#DescribeStackInstanceInput": { @@ -4578,7 +4578,7 @@ "target": "com.amazonaws.cloudformation#StackSetName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or the unique stack ID of the stack set that you want to get stack instance information for.

", + "smithy.api#documentation": "

The name or the unique stack ID of the stack set that you want to get stack instance\n information for.

", "smithy.api#required": {} } }, @@ -4601,7 +4601,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -4632,7 +4632,7 @@ "target": "com.amazonaws.cloudformation#DescribeStackResourceOutput" }, "traits": { - "smithy.api#documentation": "

Returns a description of the specified resource in the specified stack.

\n

For deleted stacks, DescribeStackResource returns resource information for up to 90 days after the stack has\n been deleted.

" + "smithy.api#documentation": "

Returns a description of the specified resource in the specified stack.

\n

For deleted stacks, DescribeStackResource returns resource information for up to 90 days\n after the stack has been deleted.

" } }, "com.amazonaws.cloudformation#DescribeStackResourceDrifts": { @@ -4644,7 +4644,7 @@ "target": "com.amazonaws.cloudformation#DescribeStackResourceDriftsOutput" }, "traits": { - "smithy.api#documentation": "

Returns drift information for the resources that have been checked for drift in the specified stack. This\n includes actual and expected configuration values for resources where CloudFormation detects configuration drift.

\n

For a given stack, there will be one StackResourceDrift for each stack resource that has been\n checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that don't currently\n support drift detection aren't checked, and so not included. For a list of resources that support drift detection,\n see Resources that Support Drift Detection.

\n

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all supported resources for a given stack.

", + "smithy.api#documentation": "

Returns drift information for the resources that have been checked for drift in the\n specified stack. This includes actual and expected configuration values for resources where\n CloudFormation detects configuration drift.

\n

For a given stack, there will be one StackResourceDrift for each stack\n resource that has been checked for drift. Resources that haven't yet been checked for drift\n aren't included. Resources that don't currently support drift detection aren't checked, and so\n not included. For a list of resources that support drift detection, see Resource\n type support for imports and drift detection.

\n

Use DetectStackResourceDrift to detect drift on individual resources, or\n DetectStackDrift to detect drift on all supported resources for a given\n stack.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -4666,7 +4666,7 @@ "StackResourceDriftStatusFilters": { "target": "com.amazonaws.cloudformation#StackResourceDriftStatusFilters", "traits": { - "smithy.api#documentation": "

The resource drift status values to use as filters for the resource drift results returned.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected template configuration in that the resource has\n been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: One or more resource properties differ from their expected template values.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected template configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation doesn't currently return this value.

    \n
  • \n
" + "smithy.api#documentation": "

The resource drift status values to use as filters for the resource drift results\n returned.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected template configuration in\n that the resource has been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: One or more resource properties differ from their expected\n template values.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected\n template configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation doesn't currently return this value.

    \n
  • \n
" } }, "NextToken": { @@ -4678,7 +4678,7 @@ "MaxResults": { "target": "com.amazonaws.cloudformation#BoxedMaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } } }, @@ -4693,14 +4693,14 @@ "target": "com.amazonaws.cloudformation#StackResourceDrifts", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Drift information for the resources that have been checked for drift in the specified stack. This includes\n actual and expected configuration values for resources where CloudFormation detects drift.

\n

For a given stack, there will be one StackResourceDrift for each stack resource that has been\n checked for drift. Resources that haven't yet been checked for drift aren't included. Resources that do not currently\n support drift detection aren't checked, and so not included. For a list of resources that support drift detection,\n see Resources that Support Drift Detection.

", + "smithy.api#documentation": "

Drift information for the resources that have been checked for drift in the specified\n stack. This includes actual and expected configuration values for resources where CloudFormation\n detects drift.

\n

For a given stack, there will be one StackResourceDrift for each stack\n resource that has been checked for drift. Resources that haven't yet been checked for drift\n aren't included. Resources that do not currently support drift detection aren't checked, and\n so not included. For a list of resources that support drift detection, see Resource\n type support for imports and drift detection.

", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call DescribeStackResourceDrifts again and assign that token to the request\n object's NextToken parameter. If the request returns all results, NextToken is set to\n null.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call DescribeStackResourceDrifts\n again and assign that token to the request object's NextToken parameter. If the\n request returns all results, NextToken is set to null.

" } } }, @@ -4715,7 +4715,7 @@ "target": "com.amazonaws.cloudformation#StackName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or the unique stack ID that's associated with the stack, which aren't always interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

", + "smithy.api#documentation": "

The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

", "smithy.api#required": {} } }, @@ -4739,7 +4739,7 @@ "StackResourceDetail": { "target": "com.amazonaws.cloudformation#StackResourceDetail", "traits": { - "smithy.api#documentation": "

A StackResourceDetail structure containing the description of the specified resource in the\n specified stack.

" + "smithy.api#documentation": "

A StackResourceDetail structure containing the description of the specified\n resource in the specified stack.

" } } }, @@ -4757,7 +4757,7 @@ "target": "com.amazonaws.cloudformation#DescribeStackResourcesOutput" }, "traits": { - "smithy.api#documentation": "

Returns Amazon Web Services resource descriptions for running and deleted stacks. If StackName is\n specified, all the associated resources that are part of the stack are returned. If PhysicalResourceId\n is specified, the associated resources of the stack that the resource belongs to are returned.

\n \n

Only the first 100 resources will be returned. If your stack has more resources than this, you should use\n ListStackResources instead.

\n
\n

For deleted stacks, DescribeStackResources returns resource information for up to 90 days after the\n stack has been deleted.

\n

You must specify either StackName or PhysicalResourceId, but not both. In addition,\n you can specify LogicalResourceId to filter the returned result. For more information about resources,\n the LogicalResourceId and PhysicalResourceId, go to the CloudFormation User Guide.

\n \n

A ValidationError is returned if you specify both StackName and\n PhysicalResourceId in the same request.

\n
" + "smithy.api#documentation": "

Returns Amazon Web Services resource descriptions for running and deleted stacks. If\n StackName is specified, all the associated resources that are part of the stack\n are returned. If PhysicalResourceId is specified, the associated resources of the\n stack that the resource belongs to are returned.

\n \n

Only the first 100 resources will be returned. If your stack has more resources than\n this, you should use ListStackResources instead.

\n
\n

For deleted stacks, DescribeStackResources returns resource information for\n up to 90 days after the stack has been deleted.

\n

You must specify either StackName or PhysicalResourceId, but not\n both. In addition, you can specify LogicalResourceId to filter the returned\n result. For more information about resources, the LogicalResourceId and\n PhysicalResourceId, see the CloudFormation User Guide.

\n \n

A ValidationError is returned if you specify both StackName\n and PhysicalResourceId in the same request.

\n
" } }, "com.amazonaws.cloudformation#DescribeStackResourcesInput": { @@ -4766,7 +4766,7 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "

The name or the unique stack ID that is associated with the stack, which aren't always interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

\n

Required: Conditional. If you don't specify StackName, you must specify\n PhysicalResourceId.

" + "smithy.api#documentation": "

The name or the unique stack ID that is associated with the stack, which aren't always\n interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

\n

Required: Conditional. If you don't specify StackName, you must specify\n PhysicalResourceId.

" } }, "LogicalResourceId": { @@ -4778,7 +4778,7 @@ "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

\n

For example, for an Amazon Elastic Compute Cloud (EC2) instance, PhysicalResourceId corresponds to\n the InstanceId. You can pass the EC2 InstanceId to DescribeStackResources to\n find which stack the instance belongs to and what other resources are part of the stack.

\n

Required: Conditional. If you don't specify PhysicalResourceId, you must specify\n StackName.

\n

Default: There is no default value.

" + "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource\n supported by CloudFormation.

\n

For example, for an Amazon Elastic Compute Cloud (EC2) instance,\n PhysicalResourceId corresponds to the InstanceId. You can pass the\n EC2 InstanceId to DescribeStackResources to find which stack the\n instance belongs to and what other resources are part of the stack.

\n

Required: Conditional. If you don't specify PhysicalResourceId, you must\n specify StackName.

\n

Default: There is no default value.

" } } }, @@ -4833,7 +4833,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -4883,7 +4883,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -4928,7 +4928,7 @@ "target": "com.amazonaws.cloudformation#DescribeStacksOutput" }, "traits": { - "smithy.api#documentation": "

Returns the description for the specified stack; if no stack name was specified, then it returns the description\n for all the stacks created. For more information about a stack's event history, see CloudFormation stack creation\n events in the CloudFormation User Guide.

\n \n

If the stack doesn't exist, a ValidationError is returned.

\n
", + "smithy.api#documentation": "

Returns the description for the specified stack; if no stack name was specified, then it\n returns the description for all the stacks created. For more information about a stack's event\n history, see Understand CloudFormation stack creation events in the\n CloudFormation User Guide.

\n \n

If the stack doesn't exist, a ValidationError is returned.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5384,7 +5384,7 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "\n

If you don't pass a parameter to StackName, the API returns a response that describes all\n resources in the account, which can impact performance. This requires ListStacks and\n DescribeStacks permissions.

\n

Consider using the ListStacks API if you're not passing a parameter to\n StackName.

\n

The IAM policy below can be added to IAM policies when you want to limit resource-level permissions and\n avoid returning a response when no parameter is sent in the request:

\n

{ \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\": \"cloudformation:DescribeStacks\",\n \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }] }

\n
\n

The name or the unique stack ID that's associated with the stack, which aren't always interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

" + "smithy.api#documentation": "\n

If you don't pass a parameter to StackName, the API returns a response\n that describes all resources in the account, which can impact performance. This requires\n ListStacks and DescribeStacks permissions.

\n

Consider using the ListStacks API if you're not passing a parameter to\n StackName.

\n

The IAM policy below can be added to IAM policies when you want to limit\n resource-level permissions and avoid returning a response when no parameter is sent in the\n request:

\n

{ \"Version\": \"2012-10-17\", \"Statement\": [{ \"Effect\": \"Deny\", \"Action\":\n \"cloudformation:DescribeStacks\", \"NotResource\": \"arn:aws:cloudformation:*:*:stack/*/*\" }]\n }

\n
\n

The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

" } }, "NextToken": { @@ -5411,7 +5411,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page\n exists, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If\n no additional page exists, this value is null.

" } } }, @@ -5437,7 +5437,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns detailed information about an extension that has been registered.

\n

If you specify a VersionId, DescribeType returns information about that specific\n extension version. Otherwise, it returns information about the default extension version.

", + "smithy.api#documentation": "

Returns detailed information about an extension that has been registered.

\n

If you specify a VersionId, DescribeType returns information\n about that specific extension version. Otherwise, it returns information about the default\n extension version.

", "smithy.api#idempotent": {} } }, @@ -5447,25 +5447,25 @@ "Type": { "target": "com.amazonaws.cloudformation#RegistryType", "traits": { - "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "Arn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "VersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource\n Name (ARN) assigned to the extension version when it is registered.

\n

If you specify a VersionId, DescribeType returns information about that specific\n extension version. Otherwise, it returns information about the default extension version.

" + "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of\n the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

\n

If you specify a VersionId, DescribeType returns information\n about that specific extension version. Otherwise, it returns information about the default\n extension version.

" } }, "PublisherId": { @@ -5503,31 +5503,31 @@ "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

If the extension is a public third-party type you have activated with a type name alias, CloudFormation returns the\n type name alias. For more information, see ActivateType.

" + "smithy.api#documentation": "

The name of the extension.

\n

If the extension is a public third-party type you have activated with a type name alias,\n CloudFormation returns the type name alias. For more information, see ActivateType.

" } }, "DefaultVersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The ID of the default version of the extension. The default version is used when the extension version isn't\n specified.

\n

This applies only to private extensions you have registered in your account. For public extensions, both those\n provided by Amazon Web Services and published by third parties, CloudFormation returns null. For more\n information, see RegisterType.

\n

To set the default version of an extension, use SetTypeDefaultVersion.

" + "smithy.api#documentation": "

The ID of the default version of the extension. The default version is used when the\n extension version isn't specified.

\n

This applies only to private extensions you have registered in your account. For public\n extensions, both those provided by Amazon Web Services and published by third parties, CloudFormation returns\n null. For more information, see RegisterType.

\n

To set the default version of an extension, use SetTypeDefaultVersion.

" } }, "IsDefaultVersion": { "target": "com.amazonaws.cloudformation#IsDefaultVersion", "traits": { - "smithy.api#documentation": "

Whether the specified extension version is set as the default version.

\n

This applies only to private extensions you have registered in your account, and extensions published by Amazon Web Services. For public third-party extensions, whether they are activated in your account, CloudFormation returns\n null.

" + "smithy.api#documentation": "

Whether the specified extension version is set as the default version.

\n

This applies only to private extensions you have registered in your account, and\n extensions published by Amazon Web Services. For public third-party extensions, whether they are activated\n in your account, CloudFormation returns null.

" } }, "TypeTestsStatus": { "target": "com.amazonaws.cloudformation#TypeTestsStatus", "traits": { - "smithy.api#documentation": "

The contract test status of the registered extension version. To return the extension test status of a specific\n extension version, you must specify VersionId.

\n

This applies only to registered private extension versions. CloudFormation doesn't return this information for\n public extensions, whether they are activated in your account.

\n
    \n
  • \n

    \n PASSED: The extension has passed all its contract tests.

    \n

    An extension must have a test status of PASSED before it can be published. For more information,\n see Publishing\n extensions to make them available for public use in the CloudFormation Command Line Interface User\n Guide.

    \n
  • \n
  • \n

    \n FAILED: The extension has failed one or more contract tests.

    \n
  • \n
  • \n

    \n IN_PROGRESS: Contract tests are currently being performed on the extension.

    \n
  • \n
  • \n

    \n NOT_TESTED: Contract tests haven't been performed on the extension.

    \n
  • \n
" + "smithy.api#documentation": "

The contract test status of the registered extension version. To return the extension test\n status of a specific extension version, you must specify VersionId.

\n

This applies only to registered private extension versions. CloudFormation doesn't return this\n information for public extensions, whether they are activated in your account.

\n
    \n
  • \n

    \n PASSED: The extension has passed all its contract tests.

    \n

    An extension must have a test status of PASSED before it can be\n published. For more information, see Publishing\n extensions to make them available for public use in the\n CloudFormation Command Line Interface (CLI) User Guide.

    \n
  • \n
  • \n

    \n FAILED: The extension has failed one or more contract tests.

    \n
  • \n
  • \n

    \n IN_PROGRESS: Contract tests are currently being performed on the\n extension.

    \n
  • \n
  • \n

    \n NOT_TESTED: Contract tests haven't been performed on the\n extension.

    \n
  • \n
" } }, "TypeTestsStatusDescription": { "target": "com.amazonaws.cloudformation#TypeTestsStatusDescription", "traits": { - "smithy.api#documentation": "

The description of the test status. To return the extension test status of a specific extension version, you\n must specify VersionId.

\n

This applies only to registered private extension versions. CloudFormation doesn't return this information for\n public extensions, whether they are activated in your account.

" + "smithy.api#documentation": "

The description of the test status. To return the extension test status of a specific\n extension version, you must specify VersionId.

\n

This applies only to registered private extension versions. CloudFormation doesn't return this\n information for public extensions, whether they are activated in your account.

" } }, "Description": { @@ -5539,43 +5539,43 @@ "Schema": { "target": "com.amazonaws.cloudformation#TypeSchema", "traits": { - "smithy.api#documentation": "

The schema that defines the extension.

\n

For more information about extension schemas, see Resource Provider Schema in the\n CloudFormation CLI User Guide.

" + "smithy.api#documentation": "

The schema that defines the extension.

\n

For more information about extension schemas, see Resource type\n schema in the CloudFormation Command Line Interface (CLI) User Guide.

" } }, "ProvisioningType": { "target": "com.amazonaws.cloudformation#ProvisioningType", "traits": { - "smithy.api#documentation": "

For resource type extensions, the provisioning behavior of the resource type. CloudFormation determines the provisioning\n type during registration, based on the types of handlers in the schema handler package submitted.

\n

Valid values include:

\n
    \n
  • \n

    \n FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during\n stack update operations.

    \n
  • \n
  • \n

    \n IMMUTABLE: The resource type doesn't include an update handler, so the type can't be updated and\n must instead be replaced during stack update operations.

    \n
  • \n
  • \n

    \n NON_PROVISIONABLE: The resource type doesn't include all the following handlers, and therefore\n can't actually be provisioned.

    \n
      \n
    • \n

      create

      \n
    • \n
    • \n

      read

      \n
    • \n
    • \n

      delete

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

For resource type extensions, the provisioning behavior of the resource type. CloudFormation\n determines the provisioning type during registration, based on the types of handlers in the\n schema handler package submitted.

\n

Valid values include:

\n
    \n
  • \n

    \n FULLY_MUTABLE: The resource type includes an update handler to process\n updates to the type during stack update operations.

    \n
  • \n
  • \n

    \n IMMUTABLE: The resource type doesn't include an update handler, so the\n type can't be updated and must instead be replaced during stack update operations.

    \n
  • \n
  • \n

    \n NON_PROVISIONABLE: The resource type doesn't include all the following\n handlers, and therefore can't actually be provisioned.

    \n
      \n
    • \n

      create

      \n
    • \n
    • \n

      read

      \n
    • \n
    • \n

      delete

      \n
    • \n
    \n
  • \n
" } }, "DeprecatedStatus": { "target": "com.amazonaws.cloudformation#DeprecatedStatus", "traits": { - "smithy.api#documentation": "

The deprecation status of the extension version.

\n

Valid values include:

\n
    \n
  • \n

    \n LIVE: The extension is activated or registered and can be used in CloudFormation operations,\n dependent on its provisioning behavior and visibility scope.

    \n
  • \n
  • \n

    \n DEPRECATED: The extension has been deactivated or deregistered and can no longer be used in\n CloudFormation operations.

    \n
  • \n
\n

For public third-party extensions, CloudFormation returns null.

" + "smithy.api#documentation": "

The deprecation status of the extension version.

\n

Valid values include:

\n
    \n
  • \n

    \n LIVE: The extension is activated or registered and can be used in\n CloudFormation operations, dependent on its provisioning behavior and visibility scope.

    \n
  • \n
  • \n

    \n DEPRECATED: The extension has been deactivated or deregistered and can no\n longer be used in CloudFormation operations.

    \n
  • \n
\n

For public third-party extensions, CloudFormation returns null.

" } }, "LoggingConfig": { "target": "com.amazonaws.cloudformation#LoggingConfig", "traits": { - "smithy.api#documentation": "

Contains logging configuration information for private extensions. This applies only to private extensions you\n have registered in your account. For public extensions, both those provided by Amazon Web Services and published by\n third parties, CloudFormation returns null. For more information, see RegisterType.

" + "smithy.api#documentation": "

Contains logging configuration information for private extensions. This applies only to\n private extensions you have registered in your account. For public extensions, both those\n provided by Amazon Web Services and published by third parties, CloudFormation returns null. For\n more information, see RegisterType.

" } }, "RequiredActivatedTypes": { "target": "com.amazonaws.cloudformation#RequiredActivatedTypes", "traits": { - "smithy.api#documentation": "

For extensions that are modules, the public third-party extensions that must be activated in your account in\n order for the module itself to be activated.

" + "smithy.api#documentation": "

For extensions that are modules, the public third-party extensions that must be activated\n in your account in order for the module itself to be activated.

" } }, "ExecutionRoleArn": { "target": "com.amazonaws.cloudformation#RoleARN2", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension. This applies only to\n private extensions you have registered in your account. For more information, see RegisterType.

\n

If the registered extension calls any Amazon Web Services APIs, you must create an \n IAM execution role\n that includes\n the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account.\n CloudFormation then assumes that execution role to provide your extension with the appropriate credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM execution role used to register the extension.\n This applies only to private extensions you have registered in your account. For more\n information, see RegisterType.

\n

If the registered extension calls any Amazon Web Services APIs, you must create an \n IAM execution\n role\n that includes the necessary permissions to call those Amazon Web Services APIs,\n and provision that execution role in your account. CloudFormation then assumes that execution role\n to provide your extension with the appropriate credentials.

" } }, "Visibility": { "target": "com.amazonaws.cloudformation#Visibility", "traits": { - "smithy.api#documentation": "

The scope at which the extension is visible and usable in CloudFormation operations.

\n

Valid values include:

\n
    \n
  • \n

    \n PRIVATE: The extension is only visible and usable within the account in which it is registered.\n CloudFormation marks any extensions you register as PRIVATE.

    \n
  • \n
  • \n

    \n PUBLIC: The extension is publicly visible and usable within any Amazon Web Services\n account.

    \n
  • \n
" + "smithy.api#documentation": "

The scope at which the extension is visible and usable in CloudFormation operations.

\n

Valid values include:

\n
    \n
  • \n

    \n PRIVATE: The extension is only visible and usable within the account in\n which it is registered. CloudFormation marks any extensions you register as\n PRIVATE.

    \n
  • \n
  • \n

    \n PUBLIC: The extension is publicly visible and usable within any Amazon Web Services\n account.

    \n
  • \n
" } }, "SourceUrl": { @@ -5593,61 +5593,61 @@ "LastUpdated": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

When the specified extension version was registered. This applies only to:

\n
    \n
  • \n

    Private extensions you have registered in your account. For more information, see RegisterType.

    \n
  • \n
  • \n

    Public extensions you have activated in your account with auto-update specified. For more information, see\n ActivateType.

    \n
  • \n
" + "smithy.api#documentation": "

When the specified extension version was registered. This applies only to:

\n
    \n
  • \n

    Private extensions you have registered in your account. For more information, see\n RegisterType.

    \n
  • \n
  • \n

    Public extensions you have activated in your account with auto-update specified. For\n more information, see ActivateType.

    \n
  • \n
" } }, "TimeCreated": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

When the specified private extension version was registered or activated in your account.

" + "smithy.api#documentation": "

When the specified private extension version was registered or activated in your\n account.

" } }, "ConfigurationSchema": { "target": "com.amazonaws.cloudformation#ConfigurationSchema", "traits": { - "smithy.api#documentation": "

A JSON string that represent the current configuration data for the extension in this account and Region.

\n

To set the configuration data for an extension, use SetTypeConfiguration. For more\n information, see Configuring extensions at\n the account level in the CloudFormation User Guide.

" + "smithy.api#documentation": "

A JSON string that represent the current configuration data for the extension in this\n account and Region.

\n

To set the configuration data for an extension, use SetTypeConfiguration. For more information, see Edit configuration\n data for extensions in your account in the\n CloudFormation User Guide.

" } }, "PublisherId": { "target": "com.amazonaws.cloudformation#PublisherId", "traits": { - "smithy.api#documentation": "

The publisher ID of the extension publisher.

\n

This applies only to public third-party extensions. For private registered extensions, and extensions provided\n by Amazon Web Services, CloudFormation returns null.

" + "smithy.api#documentation": "

The publisher ID of the extension publisher.

\n

This applies only to public third-party extensions. For private registered extensions, and\n extensions provided by Amazon Web Services, CloudFormation returns null.

" } }, "OriginalTypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the type name of the public\n extension.

\n

If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of\n the public extension. For more information, see Specifying aliases to\n refer to extensions in the CloudFormation User Guide.

" + "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the type name\n of the public extension.

\n

If you specified a TypeNameAlias when enabling the extension in this account\n and Region, CloudFormation treats that alias as the extension's type name within the account and\n Region, not the type name of the public extension. For more information, see Use aliases to refer to extensions in the\n CloudFormation User Guide.

" } }, "OriginalTypeArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the Amazon Resource Name (ARN) of\n the public extension.

" + "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the Amazon\n Resource Name (ARN) of the public extension.

" } }, "PublicVersionNumber": { "target": "com.amazonaws.cloudformation#PublicVersionNumber", "traits": { - "smithy.api#documentation": "

The version number of a public third-party extension.

\n

This applies only if you specify a public extension you have activated in your account, or specify a public\n extension without specifying a version. For all other extensions, CloudFormation returns null.

" + "smithy.api#documentation": "

The version number of a public third-party extension.

\n

This applies only if you specify a public extension you have activated in your account, or\n specify a public extension without specifying a version. For all other extensions, CloudFormation\n returns null.

" } }, "LatestPublicVersion": { "target": "com.amazonaws.cloudformation#PublicVersionNumber", "traits": { - "smithy.api#documentation": "

The latest version of a public extension that is available for use.

\n

This only applies if you specify a public extension, and you don't specify a version. For all other requests,\n CloudFormation returns null.

" + "smithy.api#documentation": "

The latest version of a public extension that is available for\n use.

\n

This only applies if you specify a public extension, and you don't specify a version. For\n all other requests, CloudFormation returns null.

" } }, "IsActivated": { "target": "com.amazonaws.cloudformation#IsActivated", "traits": { - "smithy.api#documentation": "

Whether the extension is activated in the account and Region.

\n

This only applies to public third-party extensions. For all other extensions, CloudFormation returns\n null.

" + "smithy.api#documentation": "

Whether the extension is activated in the account and Region.

\n

This only applies to public third-party extensions. For all other extensions, CloudFormation\n returns null.

" } }, "AutoUpdate": { "target": "com.amazonaws.cloudformation#AutoUpdate", "traits": { - "smithy.api#documentation": "

Whether CloudFormation automatically updates the extension in this account and Region when a new\n minor version is published by the extension publisher. Major versions released by the publisher\n must be manually updated. For more information, see Activating public extensions for\n use in your account in the CloudFormation User Guide.

" + "smithy.api#documentation": "

Whether CloudFormation automatically updates the extension in this account and Region when a\n new minor version is published by the extension publisher. Major versions\n released by the publisher must be manually updated. For more information, see Automatically use new versions of extensions in the\n CloudFormation User Guide.

" } } }, @@ -5669,7 +5669,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about an extension's registration, including its current status and type and version\n identifiers.

\n

When you initiate a registration request using RegisterType, you can then use DescribeTypeRegistration to monitor the progress of that registration request.

\n

Once the registration request has completed, use DescribeType to return detailed information\n about an extension.

", + "smithy.api#documentation": "

Returns information about an extension's registration, including its current status and\n type and version identifiers.

\n

When you initiate a registration request using RegisterType, you can\n then use DescribeTypeRegistration to monitor the progress of that\n registration request.

\n

Once the registration request has completed, use DescribeType to return\n detailed information about an extension.

", "smithy.api#idempotent": {}, "smithy.waiters#waitable": { "TypeRegistrationComplete": { @@ -5708,7 +5708,7 @@ "target": "com.amazonaws.cloudformation#RegistrationToken", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier for this registration request.

\n

This registration token is generated by CloudFormation when you initiate a registration request using RegisterType.

", + "smithy.api#documentation": "

The identifier for this registration request.

\n

This registration token is generated by CloudFormation when you initiate a registration\n request using RegisterType.

", "smithy.api#required": {} } } @@ -5735,13 +5735,13 @@ "TypeArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension being registered.

\n

For registration requests with a ProgressStatus of other than COMPLETE, this will be\n null.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension being registered.

\n

For registration requests with a ProgressStatus of other than\n COMPLETE, this will be null.

" } }, "TypeVersionArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of this specific version of the extension being registered.

\n

For registration requests with a ProgressStatus of other than COMPLETE, this will be\n null.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of this specific version of the extension being\n registered.

\n

For registration requests with a ProgressStatus of other than\n COMPLETE, this will be null.

" } } }, @@ -5784,7 +5784,7 @@ "target": "com.amazonaws.cloudformation#DetectStackDriftOutput" }, "traits": { - "smithy.api#documentation": "

Detects whether a stack's actual configuration differs, or has drifted, from its expected\n configuration, as defined in the stack template and any values specified as template parameters. For each resource in\n the stack that supports drift detection, CloudFormation compares the actual configuration of the resource with its expected\n template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A\n stack is considered to have drifted if one or more of its resources differ from their expected template\n configurations. For more information, see Detecting Unregulated Configuration Changes to\n Stacks and Resources.

\n

Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.

\n

For a list of stack resources that currently support drift detection, see Resources that Support Drift\n Detection.

\n

\n DetectStackDrift can take up to several minutes, depending on the number of resources contained\n within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack\n drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts\n to return drift information about the stack and its resources.

\n

When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks belonging to that stack.\n Perform DetectStackDrift directly on the nested stack itself.

" + "smithy.api#documentation": "

Detects whether a stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack\n template and any values specified as template parameters. For each resource in the stack that\n supports drift detection, CloudFormation compares the actual configuration of the resource with\n its expected template configuration. Only resource properties explicitly defined in the stack\n template are checked for drift. A stack is considered to have drifted if one or more of its\n resources differ from their expected template configurations. For more information, see Detect unmanaged configuration changes to stacks and resources with drift\n detection.

\n

Use DetectStackDrift to detect drift on all supported resources for a given\n stack, or DetectStackResourceDrift to detect drift on individual\n resources.

\n

For a list of stack resources that currently support drift detection, see Resource\n type support for imports and drift detection.

\n

\n DetectStackDrift can take up to several minutes, depending on the number of\n resources contained within the stack. Use DescribeStackDriftDetectionStatus\n to monitor the progress of a detect stack drift operation. Once the drift detection operation\n has completed, use DescribeStackResourceDrifts to return drift information\n about the stack and its resources.

\n

When detecting drift on a stack, CloudFormation doesn't detect drift on any nested stacks\n belonging to that stack. Perform DetectStackDrift directly on the nested stack\n itself.

" } }, "com.amazonaws.cloudformation#DetectStackDriftInput": { @@ -5816,7 +5816,7 @@ "target": "com.amazonaws.cloudformation#StackDriftDetectionId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the drift detection results of this operation.

\n

CloudFormation generates new results, with a new drift detection ID, each time this operation is run. However, the number\n of drift results CloudFormation retains for any given stack, and for how long, may vary.

", + "smithy.api#documentation": "

The ID of the drift detection results of this operation.

\n

CloudFormation generates new results, with a new drift detection ID, each time this operation\n is run. However, the number of drift results CloudFormation retains for any given stack, and for\n how long, may vary.

", "smithy.api#required": {} } } @@ -5834,7 +5834,7 @@ "target": "com.amazonaws.cloudformation#DetectStackResourceDriftOutput" }, "traits": { - "smithy.api#documentation": "

Returns information about whether a resource's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template and any values\n specified as template parameters. This information includes actual and expected property values for resources in\n which CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift.\n For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to\n Stacks and Resources.

\n

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

\n

Resources that don't currently support drift detection can't be checked. For a list of resources that support\n drift detection, see Resources that Support Drift\n Detection.

" + "smithy.api#documentation": "

Returns information about whether a resource's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack\n template and any values specified as template parameters. This information includes actual and\n expected property values for resources in which CloudFormation detects drift. Only resource\n properties explicitly defined in the stack template are checked for drift. For more\n information about stack and resource drift, see Detect unmanaged\n configuration changes to stacks and resources with drift detection.

\n

Use DetectStackResourceDrift to detect drift on individual resources, or\n DetectStackDrift to detect drift on all resources in a given stack that\n support drift detection.

\n

Resources that don't currently support drift detection can't be checked. For a list of\n resources that support drift detection, see Resource\n type support for imports and drift detection.

" } }, "com.amazonaws.cloudformation#DetectStackResourceDriftInput": { @@ -5868,7 +5868,7 @@ "target": "com.amazonaws.cloudformation#StackResourceDrift", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Information about whether the resource's actual configuration has drifted from its expected template\n configuration, including actual and expected property values and any differences detected.

", + "smithy.api#documentation": "

Information about whether the resource's actual configuration has drifted from its\n expected template configuration, including actual and expected property values and any\n differences detected.

", "smithy.api#required": {} } } @@ -5897,7 +5897,7 @@ } ], "traits": { - "smithy.api#documentation": "

Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift\n detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation performs drift\n detection on a stack set.

\n

\n DetectStackSetDrift returns the OperationId of the stack set drift detection\n operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the\n drift detection operation. The drift detection operation may take some time, depending on the number of stack\n instances included in the stack set, in addition to the number of resources included in each stack.

\n

Once the operation has completed, use the following actions to return drift information:

\n
    \n
  • \n

    Use DescribeStackSet to return detailed information about the stack set, including detailed\n information about the last completed drift operation performed on the stack set. (Information\n about drift operations that are in progress isn't included.)

    \n
  • \n
  • \n

    Use ListStackInstances to return a list of stack instances belonging to the stack set,\n including the drift status and last drift time checked of each instance.

    \n
  • \n
  • \n

    Use DescribeStackInstance to return detailed information about a specific stack instance,\n including its drift status and last drift time checked.

    \n
  • \n
\n

For more information about performing a drift detection operation on a stack set, see Detecting unmanaged changes in\n stack sets.

\n

You can only run a single drift detection operation on a given stack set at one time.

\n

To stop a drift detection stack set operation, use StopStackSetOperation.

" + "smithy.api#documentation": "

Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it\n performs drift detection on the stack associated with each stack instance in the stack set.\n For more information, see How CloudFormation performs drift\n detection on a stack set.

\n

\n DetectStackSetDrift returns the OperationId of the stack set\n drift detection operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection\n operation. The drift detection operation may take some time, depending on the number of stack\n instances included in the stack set, in addition to the number of resources included in each\n stack.

\n

Once the operation has completed, use the following actions to return drift\n information:

\n
    \n
  • \n

    Use DescribeStackSet to return detailed information about the stack\n set, including detailed information about the last completed drift\n operation performed on the stack set. (Information about drift operations that are in\n progress isn't included.)

    \n
  • \n
  • \n

    Use ListStackInstances to return a list of stack instances belonging\n to the stack set, including the drift status and last drift time checked of each\n instance.

    \n
  • \n
  • \n

    Use DescribeStackInstance to return detailed information about a\n specific stack instance, including its drift status and last drift time checked.

    \n
  • \n
\n

For more information about performing a drift detection operation on a stack set, see\n Detecting unmanaged changes in stack sets.

\n

You can only run a single drift detection operation on a given stack set at one\n time.

\n

To stop a drift detection stack set operation, use StopStackSetOperation.

" } }, "com.amazonaws.cloudformation#DetectStackSetDriftInput": { @@ -5914,7 +5914,7 @@ "OperationPreferences": { "target": "com.amazonaws.cloudformation#StackSetOperationPreferences", "traits": { - "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation\n options.

" + "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation options.

" } }, "OperationId": { @@ -5927,7 +5927,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -5941,7 +5941,7 @@ "OperationId": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

The ID of the drift detection stack set operation.

\n

You can use this operation ID with DescribeStackSetOperation to monitor the progress of the\n drift detection operation.

" + "smithy.api#documentation": "

The ID of the drift detection stack set operation.

\n

You can use this operation ID with DescribeStackSetOperation to monitor\n the progress of the drift detection operation.

" } } }, @@ -6013,7 +6013,7 @@ "target": "com.amazonaws.cloudformation#EstimateTemplateCostOutput" }, "traits": { - "smithy.api#documentation": "

Returns the estimated monthly cost of a template. The return value is an Amazon Web Services Simple Monthly\n Calculator URL with a query string that describes the resources required to run the template.

" + "smithy.api#documentation": "

Returns the estimated monthly cost of a template. The return value is an Amazon Web Services Simple\n Monthly Calculator URL with a query string that describes the resources required to run the\n template.

" } }, "com.amazonaws.cloudformation#EstimateTemplateCostInput": { @@ -6022,13 +6022,13 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.\n (For more information, go to Template Anatomy in the\n CloudFormation User Guide.)

\n

Conditional: You must pass TemplateBody or TemplateURL. If both are passed, only\n TemplateBody is used.

" + "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum\n length of 51,200 bytes.

\n

Conditional: You must pass TemplateBody or TemplateURL. If both\n are passed, only TemplateBody is used.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the\n CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://.

\n

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only\n TemplateBody is used.

" + "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template that's\n located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must\n start with https://.

\n

Conditional: You must pass TemplateURL or TemplateBody. If both\n are passed, only TemplateBody is used.

" } }, "Parameters": { @@ -6049,7 +6049,7 @@ "Url": { "target": "com.amazonaws.cloudformation#Url", "traits": { - "smithy.api#documentation": "

An Amazon Web Services Simple Monthly Calculator URL with a query string that describes the resources required to\n run the template.

" + "smithy.api#documentation": "

An Amazon Web Services Simple Monthly Calculator URL with a query string that describes the resources\n required to run the template.

" } } }, @@ -6101,7 +6101,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a stack using the input information that was provided when the specified change set was created. After\n the call successfully completes, CloudFormation starts updating the stack. Use the DescribeStacks action to\n view the status of the update.

\n

When you execute a change set, CloudFormation deletes all other change sets associated with the stack because they aren't\n valid for the updated stack.

\n

If a stack policy is associated with the stack, CloudFormation enforces the policy during the update. You can't specify a\n temporary stack policy that overrides the current policy.

\n

To create a change set for the entire stack hierarchy, IncludeNestedStacks must have been set to\n True.

" + "smithy.api#documentation": "

Updates a stack using the input information that was provided when the specified change\n set was created. After the call successfully completes, CloudFormation starts updating the stack.\n Use the DescribeStacks action to view the status of the update.

\n

When you execute a change set, CloudFormation deletes all other change sets associated with\n the stack because they aren't valid for the updated stack.

\n

If a stack policy is associated with the stack, CloudFormation enforces the policy during the\n update. You can't specify a temporary stack policy that overrides the current policy.

\n

To create a change set for the entire stack hierarchy, IncludeNestedStacks\n must have been set to True.

" } }, "com.amazonaws.cloudformation#ExecuteChangeSetInput": { @@ -6111,32 +6111,32 @@ "target": "com.amazonaws.cloudformation#ChangeSetNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of the change set that you want use to update the specified stack.

", + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of the change set that you want use to update the\n specified stack.

", "smithy.api#required": {} } }, "StackName": { "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { - "smithy.api#documentation": "

If you specified the name of a change set, specify the stack name or Amazon Resource Name (ARN) that's\n associated with the change set you want to execute.

" + "smithy.api#documentation": "

If you specified the name of a change set, specify the stack name or Amazon Resource Name\n (ARN) that's associated with the change set you want to execute.

" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this ExecuteChangeSet request. Specify this token if you plan to retry\n requests so that CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name.\n You might retry ExecuteChangeSet requests to ensure that CloudFormation successfully received them.

" + "smithy.api#documentation": "

A unique identifier for this ExecuteChangeSet request. Specify this token if\n you plan to retry requests so that CloudFormation knows that you're not attempting to execute a\n change set to update a stack with the same name. You might retry ExecuteChangeSet\n requests to ensure that CloudFormation successfully received them.

" } }, "DisableRollback": { "target": "com.amazonaws.cloudformation#DisableRollback", "traits": { - "smithy.api#documentation": "

Preserves the state of previously provisioned resources when an operation fails. This parameter can't be\n specified when the OnStackFailure parameter to the CreateChangeSet API operation was\n specified.

\n
    \n
  • \n

    \n True - if the stack creation fails, do nothing. This is equivalent to specifying\n DO_NOTHING for the OnStackFailure parameter to the CreateChangeSet API operation.

    \n
  • \n
  • \n

    \n False - if the stack creation fails, roll back the stack. This is equivalent to specifying\n ROLLBACK for the OnStackFailure parameter to the CreateChangeSet API operation.

    \n
  • \n
\n

Default: True\n

" + "smithy.api#documentation": "

Preserves the state of previously provisioned resources when an operation fails. This\n parameter can't be specified when the OnStackFailure parameter to the CreateChangeSet API operation was specified.

\n
    \n
  • \n

    \n True - if the stack creation fails, do nothing. This is equivalent to\n specifying DO_NOTHING for the OnStackFailure parameter to the\n CreateChangeSet API operation.

    \n
  • \n
  • \n

    \n False - if the stack creation fails, roll back the stack. This is\n equivalent to specifying ROLLBACK for the OnStackFailure\n parameter to the CreateChangeSet API operation.

    \n
  • \n
\n

Default: True\n

" } }, "RetainExceptOnCreate": { "target": "com.amazonaws.cloudformation#RetainExceptOnCreate", "traits": { - "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls back. This includes\n newly created resources marked with a deletion policy of Retain.

\n

Default: false\n

" + "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation\n rolls back. This includes newly created resources marked with a deletion policy of\n Retain.

\n

Default: false\n

" } } }, @@ -6216,13 +6216,13 @@ "Name": { "target": "com.amazonaws.cloudformation#ExportName", "traits": { - "smithy.api#documentation": "

The name of exported output value. Use this name and the Fn::ImportValue function to import the\n associated value into other stacks. The name is defined in the Export field in the associated stack's\n Outputs section.

" + "smithy.api#documentation": "

The name of exported output value. Use this name and the Fn::ImportValue\n function to import the associated value into other stacks. The name is defined in the\n Export field in the associated stack's Outputs section.

" } }, "Value": { "target": "com.amazonaws.cloudformation#ExportValue", "traits": { - "smithy.api#documentation": "

The value of the exported output, such as a resource physical ID. This value is defined in the\n Export field in the associated stack's Outputs section.

" + "smithy.api#documentation": "

The value of the exported output, such as a resource physical ID. This value is defined in\n the Export field in the associated stack's Outputs section.

" } } }, @@ -6432,7 +6432,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves a generated template. If the template is in an InProgress or Pending status\n then the template returned will be the template when the template was last in a Complete status. If the\n template has not yet been in a Complete status then an empty template will be returned.

", + "smithy.api#documentation": "

Retrieves a generated template. If the template is in an InProgress or\n Pending status then the template returned will be the template when the\n template was last in a Complete status. If the template has not yet been in a\n Complete status then an empty template will be returned.

", "smithy.api#examples": [ { "title": "To get a generated template in JSON format", @@ -6473,7 +6473,7 @@ "target": "com.amazonaws.cloudformation#GeneratedTemplateName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

", + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}.\n For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

", "smithy.api#required": {} } } @@ -6494,7 +6494,7 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

The template body of the generated template, in the language specified by the Language\n parameter.

" + "smithy.api#documentation": "

The template body of the generated template, in the language specified by the\n Language parameter.

" } } }, @@ -6511,7 +6511,7 @@ "target": "com.amazonaws.cloudformation#GetStackPolicyOutput" }, "traits": { - "smithy.api#documentation": "

Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value is\n returned.

" + "smithy.api#documentation": "

Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null\n value is returned.

" } }, "com.amazonaws.cloudformation#GetStackPolicyInput": { @@ -6521,7 +6521,7 @@ "target": "com.amazonaws.cloudformation#StackName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or unique stack ID that's associated with the stack whose policy you want to get.

", + "smithy.api#documentation": "

The name or unique stack ID that's associated with the stack whose policy you want to\n get.

", "smithy.api#required": {} } } @@ -6537,7 +6537,7 @@ "StackPolicyBody": { "target": "com.amazonaws.cloudformation#StackPolicyBody", "traits": { - "smithy.api#documentation": "

Structure containing the stack policy body. (For more information, go to Prevent Updates to Stack Resources in\n the CloudFormation User Guide.)

" + "smithy.api#documentation": "

Structure containing the stack policy body. (For more information, see Prevent updates to stack resources in the\n CloudFormation User Guide.)

" } } }, @@ -6560,7 +6560,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the template body for a specified stack. You can get the template for running or deleted stacks.

\n

For deleted stacks, GetTemplate returns the template for up to 90 days after the stack has been\n deleted.

\n \n

If the template doesn't exist, a ValidationError is returned.

\n
" + "smithy.api#documentation": "

Returns the template body for a specified stack. You can get the template for running or\n deleted stacks.

\n

For deleted stacks, GetTemplate returns the template for up to 90 days after\n the stack has been deleted.

\n \n

If the template doesn't exist, a ValidationError is returned.

\n
" } }, "com.amazonaws.cloudformation#GetTemplateInput": { @@ -6569,19 +6569,19 @@ "StackName": { "target": "com.amazonaws.cloudformation#StackName", "traits": { - "smithy.api#documentation": "

The name or the unique stack ID that's associated with the stack, which aren't always interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

" + "smithy.api#documentation": "

The name or the unique stack ID that's associated with the stack, which aren't always\n interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

" } }, "ChangeSetName": { "target": "com.amazonaws.cloudformation#ChangeSetNameOrId", "traits": { - "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of a change set for which CloudFormation returns the associated template. If\n you specify a name, you must also specify the StackName.

" + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of a change set for which CloudFormation returns the\n associated template. If you specify a name, you must also specify the\n StackName.

" } }, "TemplateStage": { "target": "com.amazonaws.cloudformation#TemplateStage", "traits": { - "smithy.api#documentation": "

For templates that include transforms, the stage of the template that CloudFormation returns. To get the\n user-submitted template, specify Original. To get the template after CloudFormation has processed all\n transforms, specify Processed.

\n

If the template doesn't include transforms, Original and Processed return the same\n template. By default, CloudFormation specifies Processed.

" + "smithy.api#documentation": "

For templates that include transforms, the stage of the template that CloudFormation returns.\n To get the user-submitted template, specify Original. To get the template after\n CloudFormation has processed all transforms, specify Processed.

\n

If the template doesn't include transforms, Original and\n Processed return the same template. By default, CloudFormation specifies\n Processed.

" } } }, @@ -6596,13 +6596,13 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

Structure containing the template body. (For more information, go to Template Anatomy in the\n CloudFormation User Guide.)

\n

CloudFormation returns the same template that was used when the stack was created.

" + "smithy.api#documentation": "

Structure containing the template body.

\n

CloudFormation returns the same template that was used when the stack was created.

" } }, "StagesAvailable": { "target": "com.amazonaws.cloudformation#StageList", "traits": { - "smithy.api#documentation": "

The stage of the template that you can retrieve. For stacks, the Original and\n Processed templates are always available. For change sets, the Original template is always\n available. After CloudFormation finishes creating the change set, the Processed template becomes\n available.

" + "smithy.api#documentation": "

The stage of the template that you can retrieve. For stacks, the Original and\n Processed templates are always available. For change sets, the\n Original template is always available. After CloudFormation finishes creating the\n change set, the Processed template becomes available.

" } } }, @@ -6625,7 +6625,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about a new or existing template. The GetTemplateSummary action is useful for\n viewing parameter information, such as default parameter values and parameter types, before you create or update a\n stack or stack set.

\n

You can use the GetTemplateSummary action when you submit a template, or you can get template\n information for a stack set, or a running or deleted stack.

\n

For deleted stacks, GetTemplateSummary returns the template information for up to 90 days after the\n stack has been deleted. If the template doesn't exist, a ValidationError is returned.

" + "smithy.api#documentation": "

Returns information about a new or existing template. The GetTemplateSummary\n action is useful for viewing parameter information, such as default parameter values and\n parameter types, before you create or update a stack or stack set.

\n

You can use the GetTemplateSummary action when you submit a template, or you\n can get template information for a stack set, or a running or deleted stack.

\n

For deleted stacks, GetTemplateSummary returns the template information for\n up to 90 days after the stack has been deleted. If the template doesn't exist, a\n ValidationError is returned.

" } }, "com.amazonaws.cloudformation#GetTemplateSummaryInput": { @@ -6634,31 +6634,31 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For\n more information about templates, see Template anatomy in the\n CloudFormation User Guide.

\n

Conditional: You must specify only one of the following parameters: StackName,\n StackSetName, TemplateBody, or TemplateURL.

" + "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum\n length of 51,200 bytes.

\n

Conditional: You must specify only one of the following parameters:\n StackName, StackSetName, TemplateBody, or\n TemplateURL.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's\n located in an Amazon S3 bucket or a Systems Manager document. For more information about templates, see\n Template anatomy\n in the CloudFormation User Guide. The location for an Amazon S3 bucket must start with\n https://.

\n

Conditional: You must specify only one of the following parameters: StackName,\n StackSetName, TemplateBody, or TemplateURL.

" + "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template (max size:\n 460,800 bytes) that's located in an Amazon S3 bucket or a Systems Manager document. The location\n for an Amazon S3 bucket must start with https://.

\n

Conditional: You must specify only one of the following parameters:\n StackName, StackSetName, TemplateBody, or\n TemplateURL.

" } }, "StackName": { "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { - "smithy.api#documentation": "

The name or the stack ID that's associated with the stack, which aren't always interchangeable. For running\n stacks, you can specify either the stack's name or its unique stack ID. For deleted stack, you must specify the\n unique stack ID.

\n

Conditional: You must specify only one of the following parameters: StackName,\n StackSetName, TemplateBody, or TemplateURL.

" + "smithy.api#documentation": "

The name or the stack ID that's associated with the stack, which aren't always\n interchangeable. For running stacks, you can specify either the stack's name or its unique\n stack ID. For deleted stack, you must specify the unique stack ID.

\n

Conditional: You must specify only one of the following parameters:\n StackName, StackSetName, TemplateBody, or\n TemplateURL.

" } }, "StackSetName": { "target": "com.amazonaws.cloudformation#StackSetNameOrId", "traits": { - "smithy.api#documentation": "

The name or unique ID of the stack set from which the stack was created.

\n

Conditional: You must specify only one of the following parameters: StackName,\n StackSetName, TemplateBody, or TemplateURL.

" + "smithy.api#documentation": "

The name or unique ID of the stack set from which the stack was created.

\n

Conditional: You must specify only one of the following parameters:\n StackName, StackSetName, TemplateBody, or\n TemplateURL.

" } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } }, "TemplateSummaryConfig": { @@ -6679,7 +6679,7 @@ "Parameters": { "target": "com.amazonaws.cloudformation#ParameterDeclarations", "traits": { - "smithy.api#documentation": "

A list of parameter declarations that describe various properties for each parameter.

" + "smithy.api#documentation": "

A list of parameter declarations that describe various properties for each\n parameter.

" } }, "Description": { @@ -6691,25 +6691,25 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

The capabilities found within the template. If your template contains IAM resources, you must specify the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or UpdateStack actions with your template; otherwise, those actions return\n an InsufficientCapabilities error.

\n

For more information, see Acknowledging IAM Resources in\n CloudFormation Templates.

" + "smithy.api#documentation": "

The capabilities found within the template. If your template contains IAM resources, you\n must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for\n this parameter when you use the CreateStack or UpdateStack\n actions with your template; otherwise, those actions return an\n InsufficientCapabilities error.

\n

For more information, see Acknowledging IAM resources in CloudFormation templates.

" } }, "CapabilitiesReason": { "target": "com.amazonaws.cloudformation#CapabilitiesReason", "traits": { - "smithy.api#documentation": "

The list of resources that generated the values in the Capabilities response element.

" + "smithy.api#documentation": "

The list of resources that generated the values in the Capabilities response\n element.

" } }, "ResourceTypes": { "target": "com.amazonaws.cloudformation#ResourceTypes", "traits": { - "smithy.api#documentation": "

A list of all the template resource types that are defined in the template, such as\n AWS::EC2::Instance, AWS::Dynamo::Table, and Custom::MyCustomInstance.

" + "smithy.api#documentation": "

A list of all the template resource types that are defined in the template, such as\n AWS::EC2::Instance, AWS::Dynamo::Table, and\n Custom::MyCustomInstance.

" } }, "Version": { "target": "com.amazonaws.cloudformation#Version", "traits": { - "smithy.api#documentation": "

The Amazon Web Services template format version, which identifies the capabilities of the template.

" + "smithy.api#documentation": "

The Amazon Web Services template format version, which identifies the capabilities of the\n template.

" } }, "Metadata": { @@ -6727,7 +6727,7 @@ "ResourceIdentifierSummaries": { "target": "com.amazonaws.cloudformation#ResourceIdentifierSummaries", "traits": { - "smithy.api#documentation": "

A list of resource identifier summaries that describe the target resources of an import operation and the\n properties you can provide during the import to identify the target resources. For example, BucketName\n is a possible identifier property for an AWS::S3::Bucket resource.

" + "smithy.api#documentation": "

A list of resource identifier summaries that describe the target resources of an import\n operation and the properties you can provide during the import to identify the target\n resources. For example, BucketName is a possible identifier property for an\n AWS::S3::Bucket resource.

" } }, "Warnings": { @@ -7053,7 +7053,7 @@ } ], "traits": { - "smithy.api#documentation": "

Import existing stacks into a new stack sets. Use the stack import operation to import up to 10 stacks into a\n new stack set in the same account as the source stack or in a different administrator account and Region, by\n specifying the stack ID of the stack you intend to import.

" + "smithy.api#documentation": "

Import existing stacks into a new stack sets. Use the stack import operation to import up\n to 10 stacks into a new stack set in the same account as the source stack or in a different\n administrator account and Region, by specifying the stack ID of the stack you intend to\n import.

" } }, "com.amazonaws.cloudformation#ImportStacksToStackSetInput": { @@ -7063,14 +7063,14 @@ "target": "com.amazonaws.cloudformation#StackSetNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the stack set. The name must be unique in the Region where you create your stack set.

", + "smithy.api#documentation": "

The name of the stack set. The name must be unique in the Region where you create your\n stack set.

", "smithy.api#required": {} } }, "StackIds": { "target": "com.amazonaws.cloudformation#StackIdList", "traits": { - "smithy.api#documentation": "

The IDs of the stacks you are importing into a stack set. You import up to 10 stacks per stack set at a\n time.

\n

Specify either StackIds or StackIdsUrl.

" + "smithy.api#documentation": "

The IDs of the stacks you are importing into a stack set. You import up to 10 stacks per\n stack set at a time.

\n

Specify either StackIds or StackIdsUrl.

" } }, "StackIdsUrl": { @@ -7082,13 +7082,13 @@ "OrganizationalUnitIds": { "target": "com.amazonaws.cloudformation#OrganizationalUnitIdList", "traits": { - "smithy.api#documentation": "

The list of OU ID's to which the stacks being imported has to be mapped as deployment target.

" + "smithy.api#documentation": "

The list of OU ID's to which the stacks being imported has to be mapped as deployment\n target.

" } }, "OperationPreferences": { "target": "com.amazonaws.cloudformation#StackSetOperationPreferences", "traits": { - "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation\n options.

" + "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation options.

" } }, "OperationId": { @@ -7101,7 +7101,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    For service managed stack sets, specify DELEGATED_ADMIN.

    \n
  • \n
" + "smithy.api#documentation": "

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    For service managed stack sets, specify DELEGATED_ADMIN.

    \n
  • \n
" } } }, @@ -7293,7 +7293,7 @@ "target": "com.amazonaws.cloudformation#ListChangeSetsOutput" }, "traits": { - "smithy.api#documentation": "

Returns the ID and status of each active change set for a stack. For example, CloudFormation lists change sets that are\n in the CREATE_IN_PROGRESS or CREATE_PENDING state.

", + "smithy.api#documentation": "

Returns the ID and status of each active change set for a stack. For example, CloudFormation\n lists change sets that are in the CREATE_IN_PROGRESS or\n CREATE_PENDING state.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7308,14 +7308,14 @@ "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or the Amazon Resource Name (ARN) of the stack for which you want to list change sets.

", + "smithy.api#documentation": "

The name or the Amazon Resource Name (ARN) of the stack for which you want to list change\n sets.

", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string (provided by the ListChangeSets response output) that identifies the next page of\n change sets that you want to retrieve.

" + "smithy.api#documentation": "

A string (provided by the ListChangeSets response output) that\n identifies the next page of change sets that you want to retrieve.

" } } }, @@ -7330,13 +7330,13 @@ "Summaries": { "target": "com.amazonaws.cloudformation#ChangeSetSummaries", "traits": { - "smithy.api#documentation": "

A list of ChangeSetSummary structures that provides the ID and status of each change set for the\n specified stack.

" + "smithy.api#documentation": "

A list of ChangeSetSummary structures that provides the ID and status of each\n change set for the specified stack.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB, a string that identifies the next page of change sets. If there is no additional\n page, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB, a string that identifies the next page of change sets. If\n there is no additional page, this value is null.

" } } }, @@ -7354,7 +7354,7 @@ "target": "com.amazonaws.cloudformation#ListExportsOutput" }, "traits": { - "smithy.api#documentation": "

Lists all exported output values in the account and Region in which you call this action. Use this action to see\n the exported output values that you can import into other stacks. To import values, use the \n Fn::ImportValue function.

\n

For more information, see CloudFormation export stack output\n values.

", + "smithy.api#documentation": "

Lists all exported output values in the account and Region in which you call this action.\n Use this action to see the exported output values that you can import into other stacks. To\n import values, use the \n Fn::ImportValue function.

\n

For more information, see Get exported outputs from a deployed CloudFormation stack.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7368,7 +7368,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string (provided by the ListExports response output) that identifies the next page of\n exported output values that you asked to retrieve.

" + "smithy.api#documentation": "

A string (provided by the ListExports response output) that identifies\n the next page of exported output values that you asked to retrieve.

" } } }, @@ -7388,7 +7388,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 100 exported output values, a string that identifies the next page of exports. If there is\n no additional page, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 100 exported output values, a string that identifies the next page\n of exports. If there is no additional page, this value is null.

" } } }, @@ -7426,7 +7426,7 @@ "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a NextToken value\n that you can use for the NextToken parameter to get the next set of results. By default the\n ListGeneratedTemplates API action will return at most 50 results in each response. The maximum value is\n 100.

" + "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a\n NextToken value that you can use for the NextToken parameter to\n get the next set of results. By default the ListGeneratedTemplates API action\n will return at most 50 results in each response. The maximum value is 100.

" } } }, @@ -7446,7 +7446,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call ListGeneratedTemplates again and use that value for the\n NextToken parameter. If the request returns all results, NextToken is set to an empty\n string.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call ListGeneratedTemplates again\n and use that value for the NextToken parameter. If the request returns all\n results, NextToken is set to an empty string.

" } } }, @@ -7463,7 +7463,7 @@ "target": "com.amazonaws.cloudformation#ListImportsOutput" }, "traits": { - "smithy.api#documentation": "

Lists all stacks that are importing an exported output value. To modify or remove an exported output value,\n first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

\n

For more information about importing an exported output value, see the Fn::ImportValue\n function.

", + "smithy.api#documentation": "

Lists all stacks that are importing an exported output value. To modify or remove an\n exported output value, first use this action to see which stacks are using it. To see the\n exported output values in your account, see ListExports.

\n

For more information about importing an exported output value, see the Fn::ImportValue function.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7478,14 +7478,14 @@ "target": "com.amazonaws.cloudformation#ExportName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the exported output value. CloudFormation returns the stack names that are importing this value.

", + "smithy.api#documentation": "

The name of the exported output value. CloudFormation returns the stack names that are\n importing this value.

", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string (provided by the ListImports response output) that identifies the next page of stacks\n that are importing the specified exported output value.

" + "smithy.api#documentation": "

A string (provided by the ListImports response output) that identifies\n the next page of stacks that are importing the specified exported output value.

" } } }, @@ -7505,7 +7505,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string that identifies the next page of exports. If there is no additional page, this value is null.

" + "smithy.api#documentation": "

A string that identifies the next page of exports. If there is no additional page, this\n value is null.

" } } }, @@ -7530,7 +7530,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the related resources for a list of resources from a resource scan. The response indicates whether each\n returned resource is already managed by CloudFormation.

", + "smithy.api#documentation": "

Lists the related resources for a list of resources from a resource scan. The response\n indicates whether each returned resource is already managed by CloudFormation.

", "smithy.api#examples": [ { "title": "To list resource scan related resources", @@ -7611,7 +7611,7 @@ "target": "com.amazonaws.cloudformation#ScannedResourceIdentifiers", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The list of resources for which you want to get the related resources. Up to 100 resources can be\n provided.

", + "smithy.api#documentation": "

The list of resources for which you want to get the related resources. Up to 100 resources\n can be provided.

", "smithy.api#required": {} } }, @@ -7624,7 +7624,7 @@ "MaxResults": { "target": "com.amazonaws.cloudformation#BoxedMaxResults", "traits": { - "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a NextToken value\n that you can use for the NextToken parameter to get the next set of results. By default the\n ListResourceScanRelatedResources API action will return up to 100 results in each response. The maximum\n value is 100.

" + "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a\n NextToken value that you can use for the NextToken parameter to\n get the next set of results. By default the ListResourceScanRelatedResources API\n action will return up to 100 results in each response. The maximum value is 100.

" } } }, @@ -7638,13 +7638,13 @@ "RelatedResources": { "target": "com.amazonaws.cloudformation#RelatedResources", "traits": { - "smithy.api#documentation": "

List of up to MaxResults resources in the specified resource scan related to the specified\n resources.

" + "smithy.api#documentation": "

List of up to MaxResults resources in the specified resource scan related to\n the specified resources.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call ListResourceScanRelatedResources again and use that value for the\n NextToken parameter. If the request returns all results, NextToken is set to an empty\n string.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call\n ListResourceScanRelatedResources again and use that value for the\n NextToken parameter. If the request returns all results, NextToken\n is set to an empty string.

" } } }, @@ -7669,7 +7669,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the resources from a resource scan. The results can be filtered by resource identifier, resource type\n prefix, tag key, and tag value. Only resources that match all specified filters are returned. The response indicates\n whether each returned resource is already managed by CloudFormation.

", + "smithy.api#documentation": "

Lists the resources from a resource scan. The results can be filtered by resource\n identifier, resource type prefix, tag key, and tag value. Only resources that match all\n specified filters are returned. The response indicates whether each returned resource is\n already managed by CloudFormation.

", "smithy.api#examples": [ { "title": "To list the resources in your resource scan", @@ -7748,13 +7748,13 @@ "ResourceIdentifier": { "target": "com.amazonaws.cloudformation#ResourceIdentifier", "traits": { - "smithy.api#documentation": "

If specified, the returned resources will have the specified resource identifier (or one of them in the case\n where the resource has multiple identifiers).

" + "smithy.api#documentation": "

If specified, the returned resources will have the specified resource identifier (or one\n of them in the case where the resource has multiple identifiers).

" } }, "ResourceTypePrefix": { "target": "com.amazonaws.cloudformation#ResourceTypePrefix", "traits": { - "smithy.api#documentation": "

If specified, the returned resources will be of any of the resource types with the specified prefix.

" + "smithy.api#documentation": "

If specified, the returned resources will be of any of the resource types with the\n specified prefix.

" } }, "TagKey": { @@ -7778,7 +7778,7 @@ "MaxResults": { "target": "com.amazonaws.cloudformation#ResourceScannerMaxResults", "traits": { - "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a NextToken value\n that you can use for the NextToken parameter to get the next set of results. By default the\n ListResourceScanResources API action will return at most 100 results in each response. The maximum value\n is 100.

" + "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a\n NextToken value that you can use for the NextToken parameter to\n get the next set of results. By default the ListResourceScanResources API action\n will return at most 100 results in each response. The maximum value is 100.

" } } }, @@ -7792,13 +7792,13 @@ "Resources": { "target": "com.amazonaws.cloudformation#ScannedResources", "traits": { - "smithy.api#documentation": "

List of up to MaxResults resources in the specified resource scan that match all of the specified\n filters.

" + "smithy.api#documentation": "

List of up to MaxResults resources in the specified resource scan that match\n all of the specified filters.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call ListResourceScanResources again and use that value for the\n NextToken parameter. If the request returns all results, NextToken is set to an empty\n string.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call ListResourceScanResources\n again and use that value for the NextToken parameter. If the request returns all\n results, NextToken is set to an empty string.

" } } }, @@ -7815,7 +7815,7 @@ "target": "com.amazonaws.cloudformation#ListResourceScansOutput" }, "traits": { - "smithy.api#documentation": "

List the resource scans from newest to oldest. By default it will return up to 10 resource scans.

", + "smithy.api#documentation": "

List the resource scans from newest to oldest. By default it will return up to 10 resource\n scans.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7836,7 +7836,7 @@ "MaxResults": { "target": "com.amazonaws.cloudformation#ResourceScannerMaxResults", "traits": { - "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a NextToken value\n that you can use for the NextToken parameter to get the next set of results. The default value is 10.\n The maximum value is 100.

" + "smithy.api#documentation": "

If the number of available results exceeds this maximum, the response includes a\n NextToken value that you can use for the NextToken parameter to\n get the next set of results. The default value is 10. The maximum value is 100.

" } } }, @@ -7856,7 +7856,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call ListResourceScans again and use that value for the NextToken\n parameter. If the request returns all results, NextToken is set to an empty string.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call ListResourceScans again and\n use that value for the NextToken parameter. If the request returns all results,\n NextToken is set to an empty string.

" } } }, @@ -7884,7 +7884,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns drift information for resources in a stack instance.

\n \n

\n ListStackInstanceResourceDrifts returns drift information for the most recent drift detection\n operation. If an operation is in progress, it may only return partial results.

\n
" + "smithy.api#documentation": "

Returns drift information for resources in a stack instance.

\n \n

\n ListStackInstanceResourceDrifts returns drift information for the most\n recent drift detection operation. If an operation is in progress, it may only return partial\n results.

\n
" } }, "com.amazonaws.cloudformation#ListStackInstanceResourceDriftsInput": { @@ -7901,19 +7901,19 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call this action\n again and assign that token to the request object's NextToken parameter. If there are no remaining\n results, the previous response object's NextToken parameter is set to null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call this action again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, the previous response\n object's NextToken parameter is set to null.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "StackInstanceResourceDriftStatuses": { "target": "com.amazonaws.cloudformation#StackResourceDriftStatusFilters", "traits": { - "smithy.api#documentation": "

The resource drift status of the stack instance.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected template configuration in that the resource has\n been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: One or more resource properties differ from their expected template values.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected template configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation doesn't currently return this value.

    \n
  • \n
" + "smithy.api#documentation": "

The resource drift status of the stack instance.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected template configuration in\n that the resource has been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: One or more resource properties differ from their expected\n template values.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected\n template configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation doesn't currently return this value.

    \n
  • \n
" } }, "StackInstanceAccount": { @@ -7943,7 +7943,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -7957,13 +7957,13 @@ "Summaries": { "target": "com.amazonaws.cloudformation#StackInstanceResourceDriftsSummaries", "traits": { - "smithy.api#documentation": "

A list of StackInstanceResourceDriftsSummary structures that contain information about the\n specified stack instances.

" + "smithy.api#documentation": "

A list of StackInstanceResourceDriftsSummary structures that contain\n information about the specified stack instances.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call this action\n again and assign that token to the request object's NextToken parameter. If there are no remaining\n results, the previous response object's NextToken parameter is set to null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call this action again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, the previous response\n object's NextToken parameter is set to null.

" } } }, @@ -7985,7 +7985,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns summary information about stack instances that are associated with the specified stack set. You can\n filter for stack instances that are associated with a specific Amazon Web Services account name or Region, or that\n have a specific status.

", + "smithy.api#documentation": "

Returns summary information about stack instances that are associated with the specified\n stack set. You can filter for stack instances that are associated with a specific\n Amazon Web Services account name or Region, or that have a specific status.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8008,13 +8008,13 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous request didn't return all the remaining results, the response's NextToken parameter\n value is set to a token. To retrieve the next set of results, call ListStackInstances again and assign\n that token to the request object's NextToken parameter. If there are no remaining results, the previous\n response object's NextToken parameter is set to null.

" + "smithy.api#documentation": "

If the previous request didn't return all the remaining results, the response's\n NextToken parameter value is set to a token. To retrieve the next set of\n results, call ListStackInstances again and assign that token to the request\n object's NextToken parameter. If there are no remaining results, the previous\n response object's NextToken parameter is set to null.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "Filters": { @@ -8038,7 +8038,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -8052,13 +8052,13 @@ "Summaries": { "target": "com.amazonaws.cloudformation#StackInstanceSummaries", "traits": { - "smithy.api#documentation": "

A list of StackInstanceSummary structures that contain information about the specified stack\n instances.

" + "smithy.api#documentation": "

A list of StackInstanceSummary structures that contain information about the\n specified stack instances.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call ListStackInstances again and assign that token to the request object's\n NextToken parameter. If the request returns all results, NextToken is set to\n null.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call ListStackInstances again and\n assign that token to the request object's NextToken parameter. If the request\n returns all results, NextToken is set to null.

" } } }, @@ -8075,7 +8075,7 @@ "target": "com.amazonaws.cloudformation#ListStackResourcesOutput" }, "traits": { - "smithy.api#documentation": "

Returns descriptions of all resources of the specified stack.

\n

For deleted stacks, ListStackResources returns resource information for up to 90 days after the stack has been\n deleted.

", + "smithy.api#documentation": "

Returns descriptions of all resources of the specified stack.

\n

For deleted stacks, ListStackResources returns resource information for up to 90 days\n after the stack has been deleted.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8090,14 +8090,14 @@ "target": "com.amazonaws.cloudformation#StackName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or the unique stack ID that is associated with the stack, which aren't always interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

", + "smithy.api#documentation": "

The name or the unique stack ID that is associated with the stack, which aren't always\n interchangeable:

\n
    \n
  • \n

    Running stacks: You can specify either the stack's name or its unique stack ID.

    \n
  • \n
  • \n

    Deleted stacks: You must specify the unique stack ID.

    \n
  • \n
\n

Default: There is no default value.

", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string that identifies the next page of stack resources that you want to retrieve.

" + "smithy.api#documentation": "

A string that identifies the next page of stack resources that you want to\n retrieve.

" } } }, @@ -8118,7 +8118,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB, a string that identifies the next page of stack resources. If no additional page\n exists, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB, a string that identifies the next page of stack resources. If\n no additional page exists, this value is null.

" } } }, @@ -8151,26 +8151,26 @@ "target": "com.amazonaws.cloudformation#StackSetNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or unique ID of the stack set that you want to get automatic deployment targets for.

", + "smithy.api#documentation": "

The name or unique ID of the stack set that you want to get automatic deployment targets\n for.

", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

A string that identifies the next page of stack set deployment targets that you want to retrieve.

" + "smithy.api#documentation": "

A string that identifies the next page of stack set deployment targets that you want to\n retrieve.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for StackSets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether you are acting as an account administrator in the organization's management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for StackSets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -8190,7 +8190,7 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call ListStackSetAutoDeploymentTargets again and use that value for the NextToken parameter. If the\n request returns all results, NextToken is set to an empty string.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call ListStackSetAutoDeploymentTargets again and use that value for the\n NextToken parameter. If the request returns all results, NextToken\n is set to an empty string.

" } } }, @@ -8246,19 +8246,19 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous request didn't return all the remaining results, the response object's NextToken\n parameter value is set to a token. To retrieve the next set of results, call\n ListStackSetOperationResults again and assign that token to the request object's NextToken\n parameter. If there are no remaining results, the previous response object's NextToken parameter is set\n to null.

" + "smithy.api#documentation": "

If the previous request didn't return all the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of\n results, call ListStackSetOperationResults again and assign that token to the\n request object's NextToken parameter. If there are no remaining results, the\n previous response object's NextToken parameter is set to\n null.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } }, "Filters": { @@ -8278,13 +8278,13 @@ "Summaries": { "target": "com.amazonaws.cloudformation#StackSetOperationResultSummaries", "traits": { - "smithy.api#documentation": "

A list of StackSetOperationResultSummary structures that contain information about the specified\n operation results, for accounts and Amazon Web Services Regions that are included in the operation.

" + "smithy.api#documentation": "

A list of StackSetOperationResultSummary structures that contain information\n about the specified operation results, for accounts and Amazon Web Services Regions that are included in the\n operation.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of\n results, call ListOperationResults again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, NextToken is set to\n null.

" + "smithy.api#documentation": "

If the request doesn't return all results, NextToken is set to a token. To\n retrieve the next set of results, call ListOperationResults again and assign that\n token to the request object's NextToken parameter. If there are no remaining\n results, NextToken is set to null.

" } } }, @@ -8322,26 +8322,26 @@ "target": "com.amazonaws.cloudformation#StackSetName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or unique ID of the stack set that you want to get operation summaries for.

", + "smithy.api#documentation": "

The name or unique ID of the stack set that you want to get operation summaries\n for.

", "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call\n ListStackSetOperations again and assign that token to the request object's NextToken\n parameter. If there are no remaining results, the previous response object's NextToken parameter is set\n to null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call ListStackSetOperations again and assign that token to the request\n object's NextToken parameter. If there are no remaining results, the previous\n response object's NextToken parameter is set to null.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -8355,13 +8355,13 @@ "Summaries": { "target": "com.amazonaws.cloudformation#StackSetOperationSummaries", "traits": { - "smithy.api#documentation": "

A list of StackSetOperationSummary structures that contain summary information about operations for\n the specified stack set.

" + "smithy.api#documentation": "

A list of StackSetOperationSummary structures that contain summary\n information about operations for the specified stack set.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all results, NextToken is set to a token. To retrieve the next set of\n results, call ListOperationResults again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, NextToken is set to\n null.

" + "smithy.api#documentation": "

If the request doesn't return all results, NextToken is set to a token. To\n retrieve the next set of results, call ListOperationResults again and assign that\n token to the request object's NextToken parameter. If there are no remaining\n results, NextToken is set to null.

" } } }, @@ -8378,7 +8378,7 @@ "target": "com.amazonaws.cloudformation#ListStackSetsOutput" }, "traits": { - "smithy.api#documentation": "

Returns summary information about stack sets that are associated with the user.

\n
    \n
  • \n

    [Self-managed permissions] If you set the CallAs parameter to SELF while signed in\n to your Amazon Web Services account, ListStackSets returns all self-managed stack sets in your Amazon Web Services account.

    \n
  • \n
  • \n

    [Service-managed permissions] If you set the CallAs parameter to SELF while signed\n in to the organization's management account, ListStackSets returns all stack sets in the\n management account.

    \n
  • \n
  • \n

    [Service-managed permissions] If you set the CallAs parameter to DELEGATED_ADMIN\n while signed in to your member account, ListStackSets returns all stack sets with service-managed\n permissions in the management account.

    \n
  • \n
", + "smithy.api#documentation": "

Returns summary information about stack sets that are associated with the user.

\n
    \n
  • \n

    [Self-managed permissions] If you set the CallAs parameter to\n SELF while signed in to your Amazon Web Services account, ListStackSets\n returns all self-managed stack sets in your Amazon Web Services account.

    \n
  • \n
  • \n

    [Service-managed permissions] If you set the CallAs parameter to\n SELF while signed in to the organization's management account,\n ListStackSets returns all stack sets in the management account.

    \n
  • \n
  • \n

    [Service-managed permissions] If you set the CallAs parameter to\n DELEGATED_ADMIN while signed in to your member account,\n ListStackSets returns all stack sets with service-managed permissions in\n the management account.

    \n
  • \n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8393,13 +8393,13 @@ "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call\n ListStackSets again and assign that token to the request object's NextToken parameter. If\n there are no remaining results, the previous response object's NextToken parameter is set to\n null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call ListStackSets again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, the previous response\n object's NextToken parameter is set to null.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "Status": { @@ -8411,7 +8411,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the management account or as a delegated administrator in a member\n account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -8425,13 +8425,13 @@ "Summaries": { "target": "com.amazonaws.cloudformation#StackSetSummaries", "traits": { - "smithy.api#documentation": "

A list of StackSetSummary structures that contain information about the user's stack sets.

" + "smithy.api#documentation": "

A list of StackSetSummary structures that contain information about the\n user's stack sets.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To\n retrieve the next set of results, call ListStackInstances again and assign that token to the request\n object's NextToken parameter. If the request returns all results, NextToken is set to\n null.

" + "smithy.api#documentation": "

If the request doesn't return all of the remaining results, NextToken is set\n to a token. To retrieve the next set of results, call ListStackInstances again\n and assign that token to the request object's NextToken parameter. If the request\n returns all results, NextToken is set to null.

" } } }, @@ -8448,7 +8448,7 @@ "target": "com.amazonaws.cloudformation#ListStacksOutput" }, "traits": { - "smithy.api#documentation": "

Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary\n information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter\n is specified, summary information for all stacks is returned (including existing stacks and stacks that have been\n deleted).

", + "smithy.api#documentation": "

Returns the summary information for stacks whose status matches the specified\n StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days\n after the stack is deleted. If no StackStatusFilter is specified, summary information for all\n stacks is returned (including existing stacks and stacks that have been deleted).

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8468,7 +8468,7 @@ "StackStatusFilter": { "target": "com.amazonaws.cloudformation#StackStatusFilter", "traits": { - "smithy.api#documentation": "

Stack status to use as a filter. Specify one or more stack status codes to list only stacks with the specified\n status codes. For a complete list of stack status codes, see the StackStatus parameter of the Stack data type.

" + "smithy.api#documentation": "

Stack status to use as a filter. Specify one or more stack status codes to list only\n stacks with the specified status codes. For a complete list of stack status codes, see the\n StackStatus parameter of the Stack data type.

" } } }, @@ -8483,13 +8483,13 @@ "StackSummaries": { "target": "com.amazonaws.cloudformation#StackSummaries", "traits": { - "smithy.api#documentation": "

A list of StackSummary structures containing information about the specified stacks.

" + "smithy.api#documentation": "

A list of StackSummary structures containing information about the specified\n stacks.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page\n exists, this value is null.

" + "smithy.api#documentation": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If\n no additional page exists, this value is null.

" } } }, @@ -8527,19 +8527,19 @@ "Type": { "target": "com.amazonaws.cloudformation#RegistryType", "traits": { - "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "TypeArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "RegistrationStatusFilter": { @@ -8551,13 +8551,13 @@ "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call this action again\n and assign that token to the request object's NextToken parameter. If there are no remaining results,\n the previous response object's NextToken parameter is set to null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call this action again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, the previous response\n object's NextToken parameter is set to null.

" } } }, @@ -8571,13 +8571,13 @@ "RegistrationTokenList": { "target": "com.amazonaws.cloudformation#RegistrationTokenList", "traits": { - "smithy.api#documentation": "

A list of extension registration tokens.

\n

Use DescribeTypeRegistration to return detailed information about a type registration\n request.

" + "smithy.api#documentation": "

A list of extension registration tokens.

\n

Use DescribeTypeRegistration to return detailed information about a type\n registration request.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call this action again and assign that token to the request object's NextToken\n parameter. If the request returns all results, NextToken is set to null.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call this action again and assign that token to\n the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.

" } } }, @@ -8614,37 +8614,37 @@ "Type": { "target": "com.amazonaws.cloudformation#RegistryType", "traits": { - "smithy.api#documentation": "

The kind of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The kind of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension for which you want version summary information.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The name of the extension for which you want version summary information.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "Arn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension for which you want version summary information.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension for which you want version summary\n information.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call this action again\n and assign that token to the request object's NextToken parameter. If there are no remaining results,\n the previous response object's NextToken parameter is set to null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all of the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call this action again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, the previous response\n object's NextToken parameter is set to null.

" } }, "DeprecatedStatus": { "target": "com.amazonaws.cloudformation#DeprecatedStatus", "traits": { - "smithy.api#documentation": "

The deprecation status of the extension versions that you want to get summary information about.

\n

Valid values include:

\n
    \n
  • \n

    \n LIVE: The extension version is registered and can be used in CloudFormation operations, dependent on\n its provisioning behavior and visibility scope.

    \n
  • \n
  • \n

    \n DEPRECATED: The extension version has been deregistered and can no longer be used in CloudFormation operations.

    \n
  • \n
\n

The default is LIVE.

" + "smithy.api#documentation": "

The deprecation status of the extension versions that you want to get summary information\n about.

\n

Valid values include:

\n
    \n
  • \n

    \n LIVE: The extension version is registered and can be used in CloudFormation\n operations, dependent on its provisioning behavior and visibility scope.

    \n
  • \n
  • \n

    \n DEPRECATED: The extension version has been deregistered and can no longer\n be used in CloudFormation operations.

    \n
  • \n
\n

The default is LIVE.

" } }, "PublisherId": { @@ -8664,13 +8664,13 @@ "TypeVersionSummaries": { "target": "com.amazonaws.cloudformation#TypeVersionSummaries", "traits": { - "smithy.api#documentation": "

A list of TypeVersionSummary structures that contain information about the specified extension's\n versions.

" + "smithy.api#documentation": "

A list of TypeVersionSummary structures that contain information about the\n specified extension's versions.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To\n retrieve the next set of results, call this action again and assign that token to the request object's\n NextToken parameter. If the request returns all results, NextToken is set to\n null.

" + "smithy.api#documentation": "

If the request doesn't return all of the remaining results, NextToken is set\n to a token. To retrieve the next set of results, call this action again and assign that token\n to the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.

" } } }, @@ -8692,7 +8692,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns summary information about extension that have been registered with CloudFormation.

", + "smithy.api#documentation": "

Returns summary information about extension that have been registered with\n CloudFormation.

", "smithy.api#idempotent": {}, "smithy.api#paginated": { "inputToken": "NextToken", @@ -8708,19 +8708,19 @@ "Visibility": { "target": "com.amazonaws.cloudformation#Visibility", "traits": { - "smithy.api#documentation": "

The scope at which the extensions are visible and usable in CloudFormation operations.

\n

Valid values include:

\n
    \n
  • \n

    \n PRIVATE: Extensions that are visible and usable within this account and Region. This\n includes:

    \n
      \n
    • \n

      Private extensions you have registered in this account and Region.

      \n
    • \n
    • \n

      Public extensions that you have activated in this account and Region.

      \n
    • \n
    \n
  • \n
  • \n

    \n PUBLIC: Extensions that are publicly visible and available to be activated within any Amazon Web Services account. This includes extensions from Amazon Web Services, in addition to third-party publishers.

    \n
  • \n
\n

The default is PRIVATE.

" + "smithy.api#documentation": "

The scope at which the extensions are visible and usable in CloudFormation operations.

\n

Valid values include:

\n
    \n
  • \n

    \n PRIVATE: Extensions that are visible and usable within this account and\n Region. This includes:

    \n
      \n
    • \n

      Private extensions you have registered in this account and Region.

      \n
    • \n
    • \n

      Public extensions that you have activated in this account and Region.

      \n
    • \n
    \n
  • \n
  • \n

    \n PUBLIC: Extensions that are publicly visible and available to be\n activated within any Amazon Web Services account. This includes extensions from Amazon Web Services, in addition to\n third-party publishers.

    \n
  • \n
\n

The default is PRIVATE.

" } }, "ProvisioningType": { "target": "com.amazonaws.cloudformation#ProvisioningType", "traits": { - "smithy.api#documentation": "

For resource types, the provisioning behavior of the resource type. CloudFormation determines the provisioning type\n during registration, based on the types of handlers in the schema handler package submitted.

\n

Valid values include:

\n
    \n
  • \n

    \n FULLY_MUTABLE: The resource type includes an update handler to process updates to the type during\n stack update operations.

    \n
  • \n
  • \n

    \n IMMUTABLE: The resource type doesn't include an update handler, so the type can't be updated and\n must instead be replaced during stack update operations.

    \n
  • \n
  • \n

    \n NON_PROVISIONABLE: The resource type doesn't include create, read, and delete handlers, and\n therefore can't actually be provisioned.

    \n
  • \n
\n

The default is FULLY_MUTABLE.

" + "smithy.api#documentation": "

For resource types, the provisioning behavior of the resource type. CloudFormation determines\n the provisioning type during registration, based on the types of handlers in the schema\n handler package submitted.

\n

Valid values include:

\n
    \n
  • \n

    \n FULLY_MUTABLE: The resource type includes an update handler to process\n updates to the type during stack update operations.

    \n
  • \n
  • \n

    \n IMMUTABLE: The resource type doesn't include an update handler, so the\n type can't be updated and must instead be replaced during stack update operations.

    \n
  • \n
  • \n

    \n NON_PROVISIONABLE: The resource type doesn't include create, read, and\n delete handlers, and therefore can't actually be provisioned.

    \n
  • \n
\n

The default is FULLY_MUTABLE.

" } }, "DeprecatedStatus": { "target": "com.amazonaws.cloudformation#DeprecatedStatus", "traits": { - "smithy.api#documentation": "

The deprecation status of the extension that you want to get summary information about.

\n

Valid values include:

\n
    \n
  • \n

    \n LIVE: The extension is registered for use in CloudFormation operations.

    \n
  • \n
  • \n

    \n DEPRECATED: The extension has been deregistered and can no longer be used in CloudFormation operations.

    \n
  • \n
" + "smithy.api#documentation": "

The deprecation status of the extension that you want to get summary information\n about.

\n

Valid values include:

\n
    \n
  • \n

    \n LIVE: The extension is registered for use in CloudFormation\n operations.

    \n
  • \n
  • \n

    \n DEPRECATED: The extension has been deregistered and can no longer be used\n in CloudFormation operations.

    \n
  • \n
" } }, "Type": { @@ -8732,19 +8732,19 @@ "Filters": { "target": "com.amazonaws.cloudformation#TypeFilters", "traits": { - "smithy.api#documentation": "

Filter criteria to use in determining which extensions to return.

\n

Filters must be compatible with Visibility to return valid results. For example, specifying\n AWS_TYPES for Category and PRIVATE for Visibility returns an\n empty list of types, but specifying PUBLIC for Visibility returns the desired list.

" + "smithy.api#documentation": "

Filter criteria to use in determining which extensions to return.

\n

Filters must be compatible with Visibility to return valid results. For\n example, specifying AWS_TYPES for Category and PRIVATE\n for Visibility returns an empty list of types, but specifying PUBLIC\n for Visibility returns the desired list.

" } }, "MaxResults": { "target": "com.amazonaws.cloudformation#MaxResults", "traits": { - "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this\n maximum, the response includes a NextToken value that you can assign to the NextToken\n request parameter to get the next set of results.

" + "smithy.api#documentation": "

The maximum number of results to be returned with a single call. If the number of\n available results exceeds this maximum, the response includes a NextToken value\n that you can assign to the NextToken request parameter to get the next set of\n results.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the previous paginated request didn't return all the remaining results, the response object's\n NextToken parameter value is set to a token. To retrieve the next set of results, call this action again\n and assign that token to the request object's NextToken parameter. If there are no remaining results,\n the previous response object's NextToken parameter is set to null.

" + "smithy.api#documentation": "

If the previous paginated request didn't return all the remaining results, the response\n object's NextToken parameter value is set to a token. To retrieve the next set of\n results, call this action again and assign that token to the request object's\n NextToken parameter. If there are no remaining results, the previous response\n object's NextToken parameter is set to null.

" } } }, @@ -8758,13 +8758,13 @@ "TypeSummaries": { "target": "com.amazonaws.cloudformation#TypeSummaries", "traits": { - "smithy.api#documentation": "

A list of TypeSummary structures that contain information about the specified extensions.

" + "smithy.api#documentation": "

A list of TypeSummary structures that contain information about the specified\n extensions.

" } }, "NextToken": { "target": "com.amazonaws.cloudformation#NextToken", "traits": { - "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve\n the next set of results, call this action again and assign that token to the request object's NextToken\n parameter. If the request returns all results, NextToken is set to null.

" + "smithy.api#documentation": "

If the request doesn't return all the remaining results, NextToken is set to\n a token. To retrieve the next set of results, call this action again and assign that token to\n the request object's NextToken parameter. If the request returns all results,\n NextToken is set to null.

" } } }, @@ -8789,7 +8789,7 @@ "target": "com.amazonaws.cloudformation#RoleARN2", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the role that CloudFormation should assume when sending log entries to CloudWatch Logs.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the role that CloudFormation should assume when sending log\n entries to CloudWatch Logs.

", "smithy.api#required": {} } }, @@ -8888,18 +8888,18 @@ "TypeHierarchy": { "target": "com.amazonaws.cloudformation#TypeHierarchy", "traits": { - "smithy.api#documentation": "

A concatenated list of the module type or types containing the resource. Module types are listed starting with\n the inner-most nested module, and separated by /.

\n

In the following example, the resource was created from a module of type\n AWS::First::Example::MODULE, that's nested inside a parent module of type\n AWS::Second::Example::MODULE.

\n

\n AWS::First::Example::MODULE/AWS::Second::Example::MODULE\n

" + "smithy.api#documentation": "

A concatenated list of the module type or types containing the resource. Module types are\n listed starting with the inner-most nested module, and separated by /.

\n

In the following example, the resource was created from a module of type\n AWS::First::Example::MODULE, that's nested inside a parent module of type\n AWS::Second::Example::MODULE.

\n

\n AWS::First::Example::MODULE/AWS::Second::Example::MODULE\n

" } }, "LogicalIdHierarchy": { "target": "com.amazonaws.cloudformation#LogicalIdHierarchy", "traits": { - "smithy.api#documentation": "

A concatenated list of the logical IDs of the module or modules containing the resource. Modules are listed\n starting with the inner-most nested module, and separated by /.

\n

In the following example, the resource was created from a module, moduleA, that's nested inside a\n parent module, moduleB.

\n

\n moduleA/moduleB\n

\n

For more information, see Referencing resources in a module\n in the CloudFormation User Guide.

" + "smithy.api#documentation": "

A concatenated list of the logical IDs of the module or modules containing the resource.\n Modules are listed starting with the inner-most nested module, and separated by\n /.

\n

In the following example, the resource was created from a module, moduleA,\n that's nested inside a parent module, moduleB.

\n

\n moduleA/moduleB\n

\n

For more information, see Reference module resources in\n CloudFormation templates in the CloudFormation User Guide.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource was created from a\n module included in the stack template.

\n

For more information about modules, see Using modules to encapsulate and reuse resource\n configurations in the CloudFormation User Guide.

" + "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource\n was created from a module included in the stack template.

\n

For more information about modules, see Create reusable resource configurations\n that can be included across templates with CloudFormation modules in the\n CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#MonitoringTimeInMinutes": { @@ -9253,7 +9253,7 @@ "ParameterKey": { "target": "com.amazonaws.cloudformation#ParameterKey", "traits": { - "smithy.api#documentation": "

The key associated with the parameter. If you don't specify a key and value for a particular parameter, CloudFormation uses the default value that's specified in your template.

" + "smithy.api#documentation": "

The key associated with the parameter. If you don't specify a key and value for a particular\n parameter, CloudFormation uses the default value that's specified in your template.

" } }, "ParameterValue": { @@ -9265,13 +9265,13 @@ "UsePreviousValue": { "target": "com.amazonaws.cloudformation#UsePreviousValue", "traits": { - "smithy.api#documentation": "

During a stack update, use the existing parameter value that the stack is using for a given parameter key. If\n you specify true, do not specify a parameter value.

" + "smithy.api#documentation": "

During a stack update, use the existing parameter value that the stack is using for a given\n parameter key. If you specify true, do not specify a parameter value.

" } }, "ResolvedValue": { "target": "com.amazonaws.cloudformation#ParameterValue", "traits": { - "smithy.api#documentation": "

Read-only. The value that corresponds to a SSM parameter key. This field is returned only for\n SSM\n parameter types in the template.

" + "smithy.api#documentation": "

Read-only. The value that corresponds to a Systems Manager parameter key. This field is returned only\n for Systems Manager parameter types in the template. For more information, see Use\n CloudFormation-supplied parameter types in the CloudFormation User Guide.

" } } }, @@ -9290,7 +9290,7 @@ } }, "traits": { - "smithy.api#documentation": "

A set of criteria that CloudFormation uses to validate parameter values. Although other constraints might be defined in\n the stack template, CloudFormation returns only the AllowedValues property.

" + "smithy.api#documentation": "

A set of criteria that CloudFormation uses to validate parameter values. Although other\n constraints might be defined in the stack template, CloudFormation returns only the\n AllowedValues property.

" } }, "com.amazonaws.cloudformation#ParameterDeclaration": { @@ -9317,7 +9317,7 @@ "NoEcho": { "target": "com.amazonaws.cloudformation#NoEcho", "traits": { - "smithy.api#documentation": "

Flag that indicates whether the parameter value is shown as plain text in logs and in the Amazon Web Services Management Console.

" + "smithy.api#documentation": "

Flag that indicates whether the parameter value is shown as plain text in logs and in the\n Amazon Web Services Management Console.

" } }, "Description": { @@ -9414,7 +9414,7 @@ } }, "traits": { - "smithy.api#documentation": "

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses context key-value pairs in\n cases where a resource's logical and physical IDs aren't enough to uniquely identify that resource. Each context\n key-value pair specifies a resource that contains the targeted resource.

" + "smithy.api#documentation": "

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses\n context key-value pairs in cases where a resource's logical and physical IDs aren't enough to\n uniquely identify that resource. Each context key-value pair specifies a resource that contains\n the targeted resource.

" } }, "com.amazonaws.cloudformation#PolicyAction": { @@ -9489,7 +9489,7 @@ "target": "com.amazonaws.cloudformation#PropertyValue", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The expected property value of the resource property, as defined in the stack template and any values specified\n as template parameters.

", + "smithy.api#documentation": "

The expected property value of the resource property, as defined in the stack template and\n any values specified as template parameters.

", "smithy.api#required": {} } }, @@ -9505,13 +9505,13 @@ "target": "com.amazonaws.cloudformation#DifferenceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of property difference.

\n
    \n
  • \n

    \n ADD: A value has been added to a resource property that's an array or list data type.

    \n
  • \n
  • \n

    \n REMOVE: The property has been removed from the current resource configuration.

    \n
  • \n
  • \n

    \n NOT_EQUAL: The current property value differs from its expected value (as defined in the stack\n template and any values specified as template parameters).

    \n
  • \n
", + "smithy.api#documentation": "

The type of property difference.

\n
    \n
  • \n

    \n ADD: A value has been added to a resource property that's an array or list\n data type.

    \n
  • \n
  • \n

    \n REMOVE: The property has been removed from the current resource\n configuration.

    \n
  • \n
  • \n

    \n NOT_EQUAL: The current property value differs from its expected value (as\n defined in the stack template and any values specified as template parameters).

    \n
  • \n
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Information about a resource property whose actual value differs from its expected value, as defined in the\n stack template and any values specified as template parameters. These will be present only for resources whose\n StackResourceDriftStatus is MODIFIED. For more information, see Detecting Unregulated Configuration Changes to\n Stacks and Resources.

" + "smithy.api#documentation": "

Information about a resource property whose actual value differs from its expected value, as\n defined in the stack template and any values specified as template parameters. These will be\n present only for resources whose StackResourceDriftStatus is MODIFIED.\n For more information, see Detect unmanaged\n configuration changes to stacks and resources with drift detection.

" } }, "com.amazonaws.cloudformation#PropertyDifferences": { @@ -9578,7 +9578,7 @@ } ], "traits": { - "smithy.api#documentation": "

Publishes the specified extension to the CloudFormation registry as a public extension in this Region. Public\n extensions are available for use by all CloudFormation users. For more information about publishing extensions, see\n Publishing extensions to\n make them available for public use in the CloudFormation CLI User Guide.

\n

To publish an extension, you must be registered as a publisher with CloudFormation. For more information, see RegisterPublisher.

", + "smithy.api#documentation": "

Publishes the specified extension to the CloudFormation registry as a public extension in this\n Region. Public extensions are available for use by all CloudFormation users. For more information\n about publishing extensions, see Publishing extensions to\n make them available for public use in the\n CloudFormation Command Line Interface (CLI) User Guide.

\n

To publish an extension, you must be registered as a publisher with CloudFormation. For more\n information, see RegisterPublisher.

", "smithy.api#idempotent": {} } }, @@ -9588,25 +9588,25 @@ "Type": { "target": "com.amazonaws.cloudformation#ThirdPartyType", "traits": { - "smithy.api#documentation": "

The type of the extension.

\n

Conditional: You must specify Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The type of the extension.

\n

Conditional: You must specify Arn, or TypeName and\n Type.

" } }, "Arn": { "target": "com.amazonaws.cloudformation#PrivateTypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify Arn, or TypeName and\n Type.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify Arn, or TypeName and\n Type.

" } }, "PublicVersionNumber": { "target": "com.amazonaws.cloudformation#PublicVersionNumber", "traits": { - "smithy.api#documentation": "

The version number to assign to this version of the extension.

\n

Use the following format, and adhere to semantic versioning when assigning a version number to your\n extension:

\n

\n MAJOR.MINOR.PATCH\n

\n

For more information, see Semantic Versioning 2.0.0.

\n

If you don't specify a version number, CloudFormation increments the version number by one minor version\n release.

\n

You cannot specify a version number the first time you publish a type. CloudFormation automatically sets the first\n version number to be 1.0.0.

" + "smithy.api#documentation": "

The version number to assign to this version of the extension.

\n

Use the following format, and adhere to semantic versioning when assigning a version\n number to your extension:

\n

\n MAJOR.MINOR.PATCH\n

\n

For more information, see Semantic Versioning\n 2.0.0.

\n

If you don't specify a version number, CloudFormation increments the version number by one\n minor version release.

\n

You cannot specify a version number the first time you publish a type. CloudFormation\n automatically sets the first version number to be 1.0.0.

" } } }, @@ -9695,7 +9695,7 @@ } ], "traits": { - "smithy.api#documentation": "

Reports progress of a resource handler to CloudFormation.

\n

Reserved for use by the CloudFormation CLI. Don't use this API\n in your code.

", + "smithy.api#documentation": "

Reports progress of a resource handler to CloudFormation.

\n

Reserved for use by the CloudFormation\n CLI. Don't use this API in your code.

", "smithy.api#idempotent": {} } }, @@ -9706,7 +9706,7 @@ "target": "com.amazonaws.cloudformation#ClientToken", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

", + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

", "smithy.api#required": {} } }, @@ -9714,38 +9714,38 @@ "target": "com.amazonaws.cloudformation#OperationStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

", + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

", "smithy.api#required": {} } }, "CurrentOperationStatus": { "target": "com.amazonaws.cloudformation#OperationStatus", "traits": { - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

" + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

" } }, "StatusMessage": { "target": "com.amazonaws.cloudformation#StatusMessage", "traits": { - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

" + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

" } }, "ErrorCode": { "target": "com.amazonaws.cloudformation#HandlerErrorCode", "traits": { - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

" + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

" } }, "ResourceModel": { "target": "com.amazonaws.cloudformation#ResourceModel", "traits": { - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

" + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

Reserved for use by the CloudFormation CLI.

" + "smithy.api#documentation": "

Reserved for use by the CloudFormation\n CLI.

" } } }, @@ -9806,7 +9806,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers your account as a publisher of public extensions in the CloudFormation registry. Public extensions are\n available for use by all CloudFormation users. This publisher ID applies to your account in all Amazon Web Services Regions.

\n

For information about requirements for registering as a public extension publisher, see Registering your account to publish CloudFormation extensions in the CloudFormation CLI User\n Guide.

\n

", + "smithy.api#documentation": "

Registers your account as a publisher of public extensions in the CloudFormation registry.\n Public extensions are available for use by all CloudFormation users. This publisher ID applies to\n your account in all Amazon Web Services Regions.

\n

For information about requirements for registering as a public extension publisher, see\n Prerequisite: Registering your account to publish CloudFormation extensions in the\n CloudFormation Command Line Interface (CLI) User Guide.

\n

", "smithy.api#idempotent": {} } }, @@ -9816,13 +9816,13 @@ "AcceptTermsAndConditions": { "target": "com.amazonaws.cloudformation#AcceptTermsAndConditions", "traits": { - "smithy.api#documentation": "

Whether you accept the Terms and Conditions for publishing extensions in the CloudFormation registry. You must accept the terms and\n conditions in order to register to publish public extensions to the CloudFormation registry.

\n

The default is false.

" + "smithy.api#documentation": "

Whether you accept the Terms and Conditions for publishing extensions in the CloudFormation registry. You must\n accept the terms and conditions in order to register to publish public extensions to the\n CloudFormation registry.

\n

The default is false.

" } }, "ConnectionArn": { "target": "com.amazonaws.cloudformation#ConnectionArn", "traits": { - "smithy.api#documentation": "

If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for\n your connection to that account.

\n

For more information, see Registering your account\n to publish CloudFormation extensions in the CloudFormation CLI User Guide.

" + "smithy.api#documentation": "

If you are using a Bitbucket or GitHub account for identity verification, the Amazon\n Resource Name (ARN) for your connection to that account.

\n

For more information, see Prerequisite: Registering your account to publish CloudFormation extensions in the\n CloudFormation Command Line Interface (CLI) User Guide.

" } } }, @@ -9858,7 +9858,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers an extension with the CloudFormation service. Registering an extension makes it available for use in\n CloudFormation templates in your Amazon Web Services account, and includes:

\n
    \n
  • \n

    Validating the extension schema.

    \n
  • \n
  • \n

    Determining which handlers, if any, have been specified for the extension.

    \n
  • \n
  • \n

    Making the extension available for use in your account.

    \n
  • \n
\n

For more information about how to develop extensions and ready them for registration, see Creating Resource\n Providers in the CloudFormation CLI User Guide.

\n

You can have a maximum of 50 resource extension versions registered at a time. This maximum is per account and\n per Region. Use DeregisterType to deregister specific extension versions if necessary.

\n

Once you have initiated a registration request using RegisterType, you can use DescribeTypeRegistration to monitor the progress of the registration request.

\n

Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify\n configuration properties for the extension. For more information, see Configuring extensions at\n the account level in the CloudFormation User Guide.

", + "smithy.api#documentation": "

Registers an extension with the CloudFormation service. Registering an extension makes it\n available for use in CloudFormation templates in your Amazon Web Services account, and includes:

\n
    \n
  • \n

    Validating the extension schema.

    \n
  • \n
  • \n

    Determining which handlers, if any, have been specified for the extension.

    \n
  • \n
  • \n

    Making the extension available for use in your account.

    \n
  • \n
\n

For more information about how to develop extensions and ready them for registration, see\n Creating resource types using the CloudFormation CLI in the\n CloudFormation Command Line Interface (CLI) User Guide.

\n

You can have a maximum of 50 resource extension versions registered at a time. This\n maximum is per account and per Region. Use DeregisterType\n to deregister specific extension versions if necessary.

\n

Once you have initiated a registration request using RegisterType, you\n can use DescribeTypeRegistration to monitor the progress of the registration\n request.

\n

Once you have registered a private extension in your account and Region, use SetTypeConfiguration to specify configuration properties for the extension. For\n more information, see Edit configuration\n data for extensions in your account in the\n CloudFormation User Guide.

", "smithy.api#idempotent": {} } }, @@ -9875,7 +9875,7 @@ "target": "com.amazonaws.cloudformation#TypeName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the extension being registered.

\n

We suggest that extension names adhere to the following patterns:

\n
    \n
  • \n

    For resource types,\n company_or_organization::service::type.

    \n
  • \n
  • \n

    For modules,\n company_or_organization::service::type::MODULE.

    \n
  • \n
  • \n

    For hooks,\n MyCompany::Testing::MyTestHook.

    \n
  • \n
\n \n

The following organization namespaces are reserved and can't be used in your extension names:

\n
    \n
  • \n

    \n Alexa\n

    \n
  • \n
  • \n

    \n AMZN\n

    \n
  • \n
  • \n

    \n Amazon\n

    \n
  • \n
  • \n

    \n AWS\n

    \n
  • \n
  • \n

    \n Custom\n

    \n
  • \n
  • \n

    \n Dev\n

    \n
  • \n
\n
", + "smithy.api#documentation": "

The name of the extension being registered.

\n

We suggest that extension names adhere to the following patterns:

\n
    \n
  • \n

    For resource types, company_or_organization::service::type.

    \n
  • \n
  • \n

    For modules, company_or_organization::service::type::MODULE.

    \n
  • \n
  • \n

    For hooks, MyCompany::Testing::MyTestHook.

    \n
  • \n
\n \n

The following organization namespaces are reserved and can't be used in your extension\n names:

\n
    \n
  • \n

    \n Alexa\n

    \n
  • \n
  • \n

    \n AMZN\n

    \n
  • \n
  • \n

    \n Amazon\n

    \n
  • \n
  • \n

    \n AWS\n

    \n
  • \n
  • \n

    \n Custom\n

    \n
  • \n
  • \n

    \n Dev\n

    \n
  • \n
\n
", "smithy.api#required": {} } }, @@ -9883,7 +9883,7 @@ "target": "com.amazonaws.cloudformation#S3Url", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A URL to the S3 bucket containing the extension project package that contains the necessary files for the\n extension you want to register.

\n

For information about generating a schema handler package for the extension you want to register, see submit in the\n CloudFormation CLI User Guide.

\n \n

The user registering the extension must be able to access the package in the S3 bucket. That's, the user needs\n to have GetObject permissions for\n the schema handler package. For more information, see Actions, Resources, and Condition Keys for Amazon S3\n in the Identity and Access Management User Guide.

\n
", + "smithy.api#documentation": "

A URL to the S3 bucket containing the extension project package that contains the\n necessary files for the extension you want to register.

\n

For information about generating a schema handler package for the extension you want to\n register, see submit in\n the CloudFormation Command Line Interface (CLI) User Guide.

\n \n

The user registering the extension must be able to access the package in the S3 bucket.\n That's, the user needs to have GetObject permissions for the schema\n handler package. For more information, see Actions, Resources, and Condition Keys for\n Amazon S3 in the Identity and Access Management User Guide.

\n
", "smithy.api#required": {} } }, @@ -9896,13 +9896,13 @@ "ExecutionRoleArn": { "target": "com.amazonaws.cloudformation#RoleARN2", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking the extension.

\n

For CloudFormation to assume the specified execution role, the role must contain a trust relationship with the\n CloudFormation service principal (resources.cloudformation.amazonaws.com). For more information about adding\n trust relationships, see Modifying a\n role trust policy in the Identity and Access Management User Guide.

\n

If your extension calls Amazon Web Services APIs in any of its handlers, you must create an \n IAM execution role\n that includes\n the necessary permissions to call those Amazon Web Services APIs, and provision that execution role in your account.\n When CloudFormation needs to invoke the resource type handler, CloudFormation assumes this execution role to create a temporary\n session token, which it then passes to the resource type handler, thereby supplying your resource type with the\n appropriate credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume when invoking\n the extension.

\n

For CloudFormation to assume the specified execution role, the role must contain a trust\n relationship with the CloudFormation service principal\n (resources.cloudformation.amazonaws.com). For more information about adding\n trust relationships, see Modifying a role trust policy in the Identity and Access Management User\n Guide.

\n

If your extension calls Amazon Web Services APIs in any of its handlers, you must create an\n \n IAM\n execution role\n that includes the necessary permissions to call those\n Amazon Web Services APIs, and provision that execution role in your account. When CloudFormation needs to invoke\n the resource type handler, CloudFormation assumes this execution role to create a temporary\n session token, which it then passes to the resource type handler, thereby supplying your\n resource type with the appropriate credentials.

" } }, "ClientRequestToken": { "target": "com.amazonaws.cloudformation#RequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier that acts as an idempotency key for this registration request. Specifying a client request\n token prevents CloudFormation from generating more than one version of an extension from the same registration request,\n even if the request is submitted multiple times.

" + "smithy.api#documentation": "

A unique identifier that acts as an idempotency key for this registration request.\n Specifying a client request token prevents CloudFormation from generating more than one version of\n an extension from the same registration request, even if the request is submitted multiple\n times.

" } } }, @@ -9916,7 +9916,7 @@ "RegistrationToken": { "target": "com.amazonaws.cloudformation#RegistrationToken", "traits": { - "smithy.api#documentation": "

The identifier for this registration request.

\n

Use this registration token when calling DescribeTypeRegistration, which returns information\n about the status and IDs of the extension registration.

" + "smithy.api#documentation": "

The identifier for this registration request.

\n

Use this registration token when calling DescribeTypeRegistration, which\n returns information about the status and IDs of the extension registration.

" } } }, @@ -10031,13 +10031,13 @@ "TypeNameAlias": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

An alias assigned to the public extension, in this account and Region. If you specify an alias for the\n extension, CloudFormation treats the alias as the extension type name within this account and Region. You must use the\n alias to refer to the extension in your templates, API calls, and CloudFormation console.

" + "smithy.api#documentation": "

An alias assigned to the public extension, in this account and Region. If you specify an\n alias for the extension, CloudFormation treats the alias as the extension type name within this\n account and Region. You must use the alias to refer to the extension in your templates, API\n calls, and CloudFormation console.

" } }, "OriginalTypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The type name of the public extension.

\n

If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of\n the public extension. For more information, see Specifying aliases to\n refer to extensions in the CloudFormation User Guide.

" + "smithy.api#documentation": "

The type name of the public extension.

\n

If you specified a TypeNameAlias when enabling the extension in this account\n and Region, CloudFormation treats that alias as the extension's type name within the account and\n Region, not the type name of the public extension. For more information, see Use\n aliases to refer to extensions in the CloudFormation User Guide.

" } }, "PublisherId": { @@ -10054,7 +10054,7 @@ } }, "traits": { - "smithy.api#documentation": "

For extensions that are modules, a public third-party extension that must be activated in your account in order\n for the module itself to be activated.

\n

For more information, see Activating public modules for use in your\n account in the CloudFormation User Guide.

" + "smithy.api#documentation": "

For extensions that are modules, a public third-party extension that must be activated in\n your account in order for the module itself to be activated.

\n

For more information, see Requirements for activating third-party public modules in the\n CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#RequiredActivatedTypes": { @@ -10142,13 +10142,13 @@ "PolicyAction": { "target": "com.amazonaws.cloudformation#PolicyAction", "traits": { - "smithy.api#documentation": "

The action that will be taken on the physical resource when the change set is executed.

\n
    \n
  • \n

    \n Delete The resource will be deleted.

    \n
  • \n
  • \n

    \n Retain The resource will be retained.

    \n
  • \n
  • \n

    \n Snapshot The resource will have a snapshot taken.

    \n
  • \n
  • \n

    \n ReplaceAndDelete The resource will be replaced and then deleted.

    \n
  • \n
  • \n

    \n ReplaceAndRetain The resource will be replaced and then retained.

    \n
  • \n
  • \n

    \n ReplaceAndSnapshot The resource will be replaced and then have a snapshot taken.

    \n
  • \n
" + "smithy.api#documentation": "

The action that will be taken on the physical resource when the change set is\n executed.

\n
    \n
  • \n

    \n Delete The resource will be deleted.

    \n
  • \n
  • \n

    \n Retain The resource will be retained.

    \n
  • \n
  • \n

    \n Snapshot The resource will have a snapshot taken.

    \n
  • \n
  • \n

    \n ReplaceAndDelete The resource will be replaced and then deleted.

    \n
  • \n
  • \n

    \n ReplaceAndRetain The resource will be replaced and then retained.

    \n
  • \n
  • \n

    \n ReplaceAndSnapshot The resource will be replaced and then have a snapshot\n taken.

    \n
  • \n
" } }, "Action": { "target": "com.amazonaws.cloudformation#ChangeAction", "traits": { - "smithy.api#documentation": "

The action that CloudFormation takes on the resource, such as Add (adds a new resource), Modify\n (changes a resource), Remove (deletes a resource), Import (imports a resource), or\n Dynamic (exact action for the resource can't be determined).

" + "smithy.api#documentation": "

The action that CloudFormation takes on the resource, such as Add (adds a new\n resource), Modify (changes a resource), Remove (deletes a resource),\n Import (imports a resource), or Dynamic (exact action for the resource\n can't be determined).

" } }, "LogicalResourceId": { @@ -10160,7 +10160,7 @@ "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

The resource's physical ID (resource name). Resources that you are adding don't have physical IDs because they\n haven't been created.

" + "smithy.api#documentation": "

The resource's physical ID (resource name). Resources that you are adding don't have\n physical IDs because they haven't been created.

" } }, "ResourceType": { @@ -10172,19 +10172,19 @@ "Replacement": { "target": "com.amazonaws.cloudformation#Replacement", "traits": { - "smithy.api#documentation": "

For the Modify action, indicates whether CloudFormation will replace the resource by creating a new one and\n deleting the old one. This value depends on the value of the RequiresRecreation property in the\n ResourceTargetDefinition structure. For example, if the RequiresRecreation field is\n Always and the Evaluation field is Static, Replacement is\n True. If the RequiresRecreation field is Always and the\n Evaluation field is Dynamic, Replacement is Conditionally.

\n

If you have multiple changes with different RequiresRecreation values, the Replacement\n value depends on the change with the most impact. A RequiresRecreation value of Always has\n the most impact, followed by Conditionally, and then Never.

" + "smithy.api#documentation": "

For the Modify action, indicates whether CloudFormation will replace the resource\n by creating a new one and deleting the old one. This value depends on the value of the\n RequiresRecreation property in the ResourceTargetDefinition structure.\n For example, if the RequiresRecreation field is Always and the\n Evaluation field is Static, Replacement is\n True. If the RequiresRecreation field is Always and the\n Evaluation field is Dynamic, Replacement is\n Conditional.

\n

If you have multiple changes with different RequiresRecreation values, the\n Replacement value depends on the change with the most impact. A\n RequiresRecreation value of Always has the most impact, followed by\n Conditional, and then Never.

" } }, "Scope": { "target": "com.amazonaws.cloudformation#Scope", "traits": { - "smithy.api#documentation": "

For the Modify action, indicates which resource attribute is triggering this update, such as a\n change in the resource attribute's Metadata, Properties, or Tags.

" + "smithy.api#documentation": "

For the Modify action, indicates which resource attribute is triggering this\n update, such as a change in the resource attribute's Metadata,\n Properties, or Tags.

" } }, "Details": { "target": "com.amazonaws.cloudformation#ResourceChangeDetails", "traits": { - "smithy.api#documentation": "

For the Modify action, a list of ResourceChangeDetail structures that describes the\n changes that CloudFormation will make to the resource.

" + "smithy.api#documentation": "

For the Modify action, a list of ResourceChangeDetail structures\n that describes the changes that CloudFormation will make to the resource.

" } }, "ChangeSetId": { @@ -10196,24 +10196,24 @@ "ModuleInfo": { "target": "com.amazonaws.cloudformation#ModuleInfo", "traits": { - "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource was created from a\n module included in the stack template.

" + "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource\n was created from a module included in the stack template.

" } }, "BeforeContext": { "target": "com.amazonaws.cloudformation#BeforeContext", "traits": { - "smithy.api#documentation": "

An encoded JSON string containing the context of the resource before the change is executed.

" + "smithy.api#documentation": "

An encoded JSON string containing the context of the resource before the change is\n executed.

" } }, "AfterContext": { "target": "com.amazonaws.cloudformation#AfterContext", "traits": { - "smithy.api#documentation": "

An encoded JSON string containing the context of the resource after the change is executed.

" + "smithy.api#documentation": "

An encoded JSON string containing the context of the resource after the change is\n executed.

" } } }, "traits": { - "smithy.api#documentation": "

The ResourceChange structure describes the resource and the action that CloudFormation will perform on it if\n you execute this change set.

" + "smithy.api#documentation": "

The ResourceChange structure describes the resource and the action that\n CloudFormation will perform on it if you execute this change set.

" } }, "com.amazonaws.cloudformation#ResourceChangeDetail": { @@ -10222,30 +10222,30 @@ "Target": { "target": "com.amazonaws.cloudformation#ResourceTargetDefinition", "traits": { - "smithy.api#documentation": "

A ResourceTargetDefinition structure that describes the field that CloudFormation will change and whether\n the resource will be recreated.

" + "smithy.api#documentation": "

A ResourceTargetDefinition structure that describes the field that CloudFormation\n will change and whether the resource will be recreated.

" } }, "Evaluation": { "target": "com.amazonaws.cloudformation#EvaluationType", "traits": { - "smithy.api#documentation": "

Indicates whether CloudFormation can determine the target value, and whether the target value will change before you\n execute a change set.

\n

For Static evaluations, CloudFormation can determine that the target value will change, and its value. For\n example, if you directly modify the InstanceType property of an EC2 instance, CloudFormation knows\n that this property value will change, and its value, so this is a Static evaluation.

\n

For Dynamic evaluations, can't determine the target value because it depends on the result of an\n intrinsic function, such as a Ref or Fn::GetAtt intrinsic function, when the stack is\n updated. For example, if your template includes a reference to a resource that's conditionally recreated, the value\n of the reference (the physical ID of the resource) might change, depending on if the resource is recreated. If the\n resource is recreated, it will have a new physical ID, so all references to that resource will also be\n updated.

" + "smithy.api#documentation": "

Indicates whether CloudFormation can determine the target value, and whether the target value\n will change before you execute a change set.

\n

For Static evaluations, CloudFormation can determine that the target value will\n change, and its value. For example, if you directly modify the InstanceType property\n of an EC2 instance, CloudFormation knows that this property value will change, and its value, so this\n is a Static evaluation.

\n

For Dynamic evaluations, can't determine the target value because it depends on\n the result of an intrinsic function, such as a Ref or Fn::GetAtt\n intrinsic function, when the stack is updated. For example, if your template includes a reference\n to a resource that's conditionally recreated, the value of the reference (the physical ID of the\n resource) might change, depending on if the resource is recreated. If the resource is recreated,\n it will have a new physical ID, so all references to that resource will also be updated.

" } }, "ChangeSource": { "target": "com.amazonaws.cloudformation#ChangeSource", "traits": { - "smithy.api#documentation": "

The group to which the CausingEntity value belongs. There are five entity groups:

\n
    \n
  • \n

    \n ResourceReference entities are Ref intrinsic functions that refer to resources in\n the template, such as { \"Ref\" : \"MyEC2InstanceResource\" }.

    \n
  • \n
  • \n

    \n ParameterReference entities are Ref intrinsic functions that get template parameter\n values, such as { \"Ref\" : \"MyPasswordParameter\" }.

    \n
  • \n
  • \n

    \n ResourceAttribute entities are Fn::GetAtt intrinsic functions that get resource\n attribute values, such as { \"Fn::GetAtt\" : [ \"MyEC2InstanceResource\", \"PublicDnsName\" ] }.

    \n
  • \n
  • \n

    \n DirectModification entities are changes that are made directly to the template.

    \n
  • \n
  • \n

    \n Automatic entities are AWS::CloudFormation::Stack resource types, which are also\n known as nested stacks. If you made no changes to the AWS::CloudFormation::Stack resource, CloudFormation sets the ChangeSource to Automatic because the nested stack's template might\n have changed. Changes to a nested stack's template aren't visible to CloudFormation until you run an update on the parent\n stack.

    \n
  • \n
" + "smithy.api#documentation": "

The group to which the CausingEntity value belongs. There are five entity\n groups:

\n
    \n
  • \n

    \n ResourceReference entities are Ref intrinsic functions that refer to\n resources in the template, such as { \"Ref\" : \"MyEC2InstanceResource\" }.

    \n
  • \n
  • \n

    \n ParameterReference entities are Ref intrinsic functions that get\n template parameter values, such as { \"Ref\" : \"MyPasswordParameter\" }.

    \n
  • \n
  • \n

    \n ResourceAttribute entities are Fn::GetAtt intrinsic functions that\n get resource attribute values, such as { \"Fn::GetAtt\" : [ \"MyEC2InstanceResource\",\n \"PublicDnsName\" ] }.

    \n
  • \n
  • \n

    \n DirectModification entities are changes that are made directly to the\n template.

    \n
  • \n
  • \n

    \n Automatic entities are AWS::CloudFormation::Stack resource types,\n which are also known as nested stacks. If you made no changes to the\n AWS::CloudFormation::Stack resource, CloudFormation sets the\n ChangeSource to Automatic because the nested stack's template might\n have changed. Changes to a nested stack's template aren't visible to CloudFormation until you run\n an update on the parent stack.

    \n
  • \n
" } }, "CausingEntity": { "target": "com.amazonaws.cloudformation#CausingEntity", "traits": { - "smithy.api#documentation": "

The identity of the entity that triggered this change. This entity is a member of the group that's specified by\n the ChangeSource field. For example, if you modified the value of the KeyPairName\n parameter, the CausingEntity is the name of the parameter (KeyPairName).

\n

If the ChangeSource value is DirectModification, no value is given for\n CausingEntity.

" + "smithy.api#documentation": "

The identity of the entity that triggered this change. This entity is a member of the group\n that's specified by the ChangeSource field. For example, if you modified the value\n of the KeyPairName parameter, the CausingEntity is the name of the\n parameter (KeyPairName).

\n

If the ChangeSource value is DirectModification, no value is given\n for CausingEntity.

" } } }, "traits": { - "smithy.api#documentation": "

For a resource with Modify as the action, the ResourceChange structure describes the\n changes CloudFormation will make to that resource.

" + "smithy.api#documentation": "

For a resource with Modify as the action, the ResourceChange\n structure describes the changes CloudFormation will make to that resource.

" } }, "com.amazonaws.cloudformation#ResourceChangeDetails": { @@ -10261,7 +10261,7 @@ "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see\n IaC\n generator supported resource types in the CloudFormation User Guide\n

", + "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of\n supported resources, see Resource type\n support for imports and drift detection in the\n CloudFormation User Guide\n

", "smithy.api#required": {} } }, @@ -10275,13 +10275,13 @@ "target": "com.amazonaws.cloudformation#ResourceIdentifierProperties", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the\n primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list\n in the resource schema.) The value is the value of that primary identifier. For example, for a\n AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair\n could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension\n development.

", + "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies the scanned resource. The key is the\n name of one of the primary identifiers for the resource. (Primary identifiers are specified in\n the primaryIdentifier list in the resource schema.) The value is the value of that\n primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary\n identifiers is TableName so the key-value pair could be \"TableName\":\n \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A resource included in a generated template. This data type is used with the\n CreateGeneratedTemplate and UpdateGeneratedTemplate API actions.

" + "smithy.api#documentation": "

A resource included in a generated template. This data type is used with the\n CreateGeneratedTemplate and UpdateGeneratedTemplate API\n actions.

" } }, "com.amazonaws.cloudformation#ResourceDefinitions": { @@ -10302,7 +10302,7 @@ "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { - "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see\n IaC\n generator supported resource types In the CloudFormation User Guide\n

" + "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of\n supported resources, see Resource type\n support for imports and drift detection In the\n CloudFormation User Guide\n

" } }, "LogicalResourceId": { @@ -10314,13 +10314,13 @@ "ResourceIdentifier": { "target": "com.amazonaws.cloudformation#ResourceIdentifierProperties", "traits": { - "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies the resource in the generated template. The key is the name\n of one of the primary identifiers for the resource. (Primary identifiers are specified in the\n primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For\n example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the\n key-value pair could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension\n development.

" + "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies the resource in the generated template.\n The key is the name of one of the primary identifiers for the resource. (Primary identifiers are\n specified in the primaryIdentifier list in the resource schema.) The value is the\n value of that primary identifier. For example, for a AWS::DynamoDB::Table resource,\n the primary identifiers is TableName so the key-value pair could be\n \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide.

" } }, "ResourceStatus": { "target": "com.amazonaws.cloudformation#GeneratedTemplateResourceStatus", "traits": { - "smithy.api#documentation": "

Status of the processing of a resource in a generated template.

\n
\n
InProgress
\n
\n

The resource processing is still in progress.

\n
\n
Complete
\n
\n

The resource processing is complete.

\n
\n
Pending
\n
\n

The resource processing is pending.

\n
\n
Failed
\n
\n

The resource processing has failed.

\n
\n
" + "smithy.api#documentation": "

Status of the processing of a resource in a generated template.

\n
\n
\n \n InProgress\n \n
\n
\n

The resource processing is still in progress.

\n
\n
\n \n Complete\n \n
\n
\n

The resource processing is complete.

\n
\n
\n \n Pending\n \n
\n
\n

The resource processing is pending.

\n
\n
\n \n Failed\n \n
\n
\n

The resource processing has failed.

\n
\n
" } }, "ResourceStatusReason": { @@ -10400,24 +10400,24 @@ "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { - "smithy.api#documentation": "

The template resource type of the target resources, such as AWS::S3::Bucket.

" + "smithy.api#documentation": "

The template resource type of the target resources, such as\n AWS::S3::Bucket.

" } }, "LogicalResourceIds": { "target": "com.amazonaws.cloudformation#LogicalResourceIds", "traits": { - "smithy.api#documentation": "

The logical IDs of the target resources of the specified ResourceType, as defined in the import\n template.

" + "smithy.api#documentation": "

The logical IDs of the target resources of the specified ResourceType, as\n defined in the import template.

" } }, "ResourceIdentifiers": { "target": "com.amazonaws.cloudformation#ResourceIdentifiers", "traits": { - "smithy.api#documentation": "

The resource properties you can provide during the import to identify your target resources. For example,\n BucketName is a possible identifier property for AWS::S3::Bucket resources.

" + "smithy.api#documentation": "

The resource properties you can provide during the import to identify your target\n resources. For example, BucketName is a possible identifier property for\n AWS::S3::Bucket resources.

" } } }, "traits": { - "smithy.api#documentation": "

Describes the target resources of a specific type in your import template (for example, all\n AWS::S3::Bucket resources) and the properties you can provide during the import to identify resources of\n that type.

" + "smithy.api#documentation": "

Describes the target resources of a specific type in your import template (for example,\n all AWS::S3::Bucket resources) and the properties you can provide during the\n import to identify resources of that type.

" } }, "com.amazonaws.cloudformation#ResourceIdentifiers": { @@ -10545,13 +10545,13 @@ "Status": { "target": "com.amazonaws.cloudformation#ResourceScanStatus", "traits": { - "smithy.api#documentation": "

Status of the resource scan.

\n
\n
INPROGRESS
\n
\n

The resource scan is still in progress.

\n
\n
COMPLETE
\n
\n

The resource scan is complete.

\n
\n
EXPIRED
\n
\n

The resource scan has expired.

\n
\n
FAILED
\n
\n

The resource scan has failed.

\n
\n
" + "smithy.api#documentation": "

Status of the resource scan.

\n
\n
\n \n INPROGRESS\n \n
\n
\n

The resource scan is still in progress.

\n
\n
\n \n COMPLETE\n \n
\n
\n

The resource scan is complete.

\n
\n
\n \n EXPIRED\n \n
\n
\n

The resource scan has expired.

\n
\n
\n \n FAILED\n \n
\n
\n

The resource scan has failed.

\n
\n
" } }, "StatusReason": { "target": "com.amazonaws.cloudformation#ResourceScanStatusReason", "traits": { - "smithy.api#documentation": "

The reason for the resource scan status, providing more information if a failure happened.

" + "smithy.api#documentation": "

The reason for the resource scan status, providing more information if a failure\n happened.

" } }, "StartTime": { @@ -10574,7 +10574,7 @@ } }, "traits": { - "smithy.api#documentation": "

A summary of the resource scan. This is returned by the ListResourceScan API action.

" + "smithy.api#documentation": "

A summary of the resource scan. This is returned by the ListResourceScan API\n action.

" } }, "com.amazonaws.cloudformation#ResourceScannerMaxResults": { @@ -10752,19 +10752,19 @@ "Attribute": { "target": "com.amazonaws.cloudformation#ResourceAttribute", "traits": { - "smithy.api#documentation": "

Indicates which resource attribute is triggering this update, such as a change in the resource attribute's\n Metadata, Properties, or Tags.

" + "smithy.api#documentation": "

Indicates which resource attribute is triggering this update, such as a change in the\n resource attribute's Metadata, Properties, or Tags.

" } }, "Name": { "target": "com.amazonaws.cloudformation#PropertyName", "traits": { - "smithy.api#documentation": "

If the Attribute value is Properties, the name of the property. For all other\n attributes, the value is null.

" + "smithy.api#documentation": "

If the Attribute value is Properties, the name of the property.\n For all other attributes, the value is null.

" } }, "RequiresRecreation": { "target": "com.amazonaws.cloudformation#RequiresRecreation", "traits": { - "smithy.api#documentation": "

If the Attribute value is Properties, indicates whether a change to this property\n causes the resource to be recreated. The value can be Never, Always, or\n Conditionally. To determine the conditions for a Conditionally recreation, see the update\n behavior for that property in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

If the Attribute value is Properties, indicates whether a change\n to this property causes the resource to be recreated. The value can be Never,\n Always, or Conditionally. To determine the conditions for a\n Conditionally recreation, see the update behavior for that property in the Amazon Web Services resource and\n property types reference in the CloudFormation User Guide.

" } }, "Path": { @@ -10776,13 +10776,13 @@ "BeforeValue": { "target": "com.amazonaws.cloudformation#BeforeValue", "traits": { - "smithy.api#documentation": "

The value of the property before the change is executed. Large values can be truncated.

" + "smithy.api#documentation": "

The value of the property before the change is executed. Large values can be\n truncated.

" } }, "AfterValue": { "target": "com.amazonaws.cloudformation#AfterValue", "traits": { - "smithy.api#documentation": "

The value of the property after the change is executed. Large values can be truncated.

" + "smithy.api#documentation": "

The value of the property after the change is executed. Large values can be\n truncated.

" } }, "AttributeChangeType": { @@ -10793,7 +10793,7 @@ } }, "traits": { - "smithy.api#documentation": "

The field that CloudFormation will change, such as the name of a resource's property, and whether the resource will be\n recreated.

" + "smithy.api#documentation": "

The field that CloudFormation will change, such as the name of a resource's property, and\n whether the resource will be recreated.

" } }, "com.amazonaws.cloudformation#ResourceToImport": { @@ -10803,7 +10803,7 @@ "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of resource to import into your stack, such as AWS::S3::Bucket. For a list of supported\n resource types, see Resources that support import\n operations in the CloudFormation User Guide.

", + "smithy.api#documentation": "

The type of resource to import into your stack, such as AWS::S3::Bucket. For a\n list of supported resource types, see Resource type\n support for imports and drift detection in the\n CloudFormation User Guide.

", "smithy.api#required": {} } }, @@ -10819,7 +10819,7 @@ "target": "com.amazonaws.cloudformation#ResourceIdentifierProperties", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A key-value pair that identifies the target resource. The key is an identifier property (for example,\n BucketName for AWS::S3::Bucket resources) and the value is the actual property value (for\n example, MyS3Bucket).

", + "smithy.api#documentation": "

A key-value pair that identifies the target resource. The key is an identifier property (for\n example, BucketName for AWS::S3::Bucket resources) and the value is the\n actual property value (for example, MyS3Bucket).

", "smithy.api#required": {} } } @@ -10951,18 +10951,18 @@ "RollbackTriggers": { "target": "com.amazonaws.cloudformation#RollbackTriggers", "traits": { - "smithy.api#documentation": "

The triggers to monitor during stack creation or update actions.

\n

By default, CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update\n operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those\n triggers replace any list of triggers previously specified for the stack. This means:

\n
    \n
  • \n

    To use the rollback triggers previously specified for this stack, if any, don't specify this parameter.

    \n
  • \n
  • \n

    To specify new or updated rollback triggers, you must specify all the triggers that you\n want used for this stack, even triggers you've specified before (for example, when creating the stack or during a\n previous stack update). Any triggers that you don't include in the updated list of triggers are no longer applied\n to the stack.

    \n
  • \n
  • \n

    To remove all currently specified triggers, specify an empty list for this parameter.

    \n
  • \n
\n

If a specified trigger is missing, the entire stack operation fails and is rolled back.

" + "smithy.api#documentation": "

The triggers to monitor during stack creation or update actions.

\n

By default, CloudFormation saves the rollback triggers specified for a stack and applies them to\n any subsequent update operations for the stack, unless you specify otherwise. If you do specify\n rollback triggers for this parameter, those triggers replace any list of triggers previously\n specified for the stack. This means:

\n
    \n
  • \n

    To use the rollback triggers previously specified for this stack, if any, don't specify\n this parameter.

    \n
  • \n
  • \n

    To specify new or updated rollback triggers, you must specify all the\n triggers that you want used for this stack, even triggers you've specified before (for example,\n when creating the stack or during a previous stack update). Any triggers that you don't include\n in the updated list of triggers are no longer applied to the stack.

    \n
  • \n
  • \n

    To remove all currently specified triggers, specify an empty list for this\n parameter.

    \n
  • \n
\n

If a specified trigger is missing, the entire stack operation fails and is rolled\n back.

" } }, "MonitoringTimeInMinutes": { "target": "com.amazonaws.cloudformation#MonitoringTimeInMinutes", "traits": { - "smithy.api#documentation": "

The amount of time, in minutes, during which CloudFormation should monitor all the rollback triggers after the stack\n creation or update operation deploys all necessary resources.

\n

The default is 0 minutes.

\n

If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still waits the specified\n period of time before cleaning up old resources after update operations. You can use this monitoring period to\n perform any manual stack validation desired, and manually cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.

\n

If you specify 0 for this parameter, CloudFormation still monitors the specified rollback triggers during stack\n creation and update operations. Then, for update operations, it begins disposing of old resources immediately once\n the operation completes.

" + "smithy.api#documentation": "

The amount of time, in minutes, during which CloudFormation should monitor all the rollback\n triggers after the stack creation or update operation deploys all necessary resources.

\n

The default is 0 minutes.

\n

If you specify a monitoring period but don't specify any rollback triggers, CloudFormation still\n waits the specified period of time before cleaning up old resources after update operations. You\n can use this monitoring period to perform any manual stack validation desired, and manually\n cancel the stack creation or update (using CancelUpdateStack, for example) as necessary.

\n

If you specify 0 for this parameter, CloudFormation still monitors the specified rollback\n triggers during stack creation and update operations. Then, for update operations, it begins\n disposing of old resources immediately once the operation completes.

" } } }, "traits": { - "smithy.api#documentation": "

Structure containing the rollback triggers for CloudFormation to monitor during stack creation and updating operations,\n and for the specified monitoring period afterwards.

\n

Rollback triggers enable you to have CloudFormation monitor the state of your application during stack creation and\n updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've\n specified. For more information, see Monitor and Roll Back Stack\n Operations.

" + "smithy.api#documentation": "

Structure containing the rollback triggers for CloudFormation to monitor during stack creation\n and updating operations, and for the specified monitoring period afterwards.

\n

Rollback triggers enable you to have CloudFormation monitor the state of your application during\n stack creation and updating, and to roll back that operation if the application breaches the\n threshold of any of the alarms you've specified. For more information, see Roll back your CloudFormation stack on alarm breach with rollback triggers.

" } }, "com.amazonaws.cloudformation#RollbackStack": { @@ -10979,7 +10979,7 @@ } ], "traits": { - "smithy.api#documentation": "

When specifying RollbackStack, you preserve the state of previously provisioned resources when an\n operation fails. You can check the status of the stack through the DescribeStacks operation.

\n

Rolls back the specified stack to the last known stable state from CREATE_FAILED or\n UPDATE_FAILED stack statuses.

\n

This operation will delete a stack if it doesn't contain a last known stable state. A last known stable state\n includes any status in a *_COMPLETE. This includes the following stack statuses.

\n
    \n
  • \n

    \n CREATE_COMPLETE\n

    \n
  • \n
  • \n

    \n UPDATE_COMPLETE\n

    \n
  • \n
  • \n

    \n UPDATE_ROLLBACK_COMPLETE\n

    \n
  • \n
  • \n

    \n IMPORT_COMPLETE\n

    \n
  • \n
  • \n

    \n IMPORT_ROLLBACK_COMPLETE\n

    \n
  • \n
" + "smithy.api#documentation": "

When specifying RollbackStack, you preserve the state of previously\n provisioned resources when an operation fails. You can check the status of the stack through\n the DescribeStacks operation.

\n

Rolls back the specified stack to the last known stable state from\n CREATE_FAILED or UPDATE_FAILED stack statuses.

\n

This operation will delete a stack if it doesn't contain a last known stable state. A last\n known stable state includes any status in a *_COMPLETE. This includes the\n following stack statuses.

\n
    \n
  • \n

    \n CREATE_COMPLETE\n

    \n
  • \n
  • \n

    \n UPDATE_COMPLETE\n

    \n
  • \n
  • \n

    \n UPDATE_ROLLBACK_COMPLETE\n

    \n
  • \n
  • \n

    \n IMPORT_COMPLETE\n

    \n
  • \n
  • \n

    \n IMPORT_ROLLBACK_COMPLETE\n

    \n
  • \n
" } }, "com.amazonaws.cloudformation#RollbackStackInput": { @@ -10996,7 +10996,7 @@ "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management role that CloudFormation assumes to rollback the\n stack.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to rollback the\n stack.

" } }, "ClientRequestToken": { @@ -11008,7 +11008,7 @@ "RetainExceptOnCreate": { "target": "com.amazonaws.cloudformation#RetainExceptOnCreate", "traits": { - "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls back. This includes\n newly created resources marked with a deletion policy of Retain.

\n

Default: false\n

" + "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation\n rolls back. This includes newly created resources marked with a deletion policy of\n Retain.

\n

Default: false\n

" } } }, @@ -11037,7 +11037,7 @@ "target": "com.amazonaws.cloudformation#Arn", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the rollback trigger.

\n

If a specified trigger is missing, the entire stack operation fails and is rolled back.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the rollback trigger.

\n

If a specified trigger is missing, the entire stack operation fails and is rolled\n back.

", "smithy.api#required": {} } }, @@ -11045,13 +11045,13 @@ "target": "com.amazonaws.cloudformation#Type", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.

", + "smithy.api#documentation": "

The resource type of the rollback trigger. Specify either AWS::CloudWatch::Alarm or AWS::CloudWatch::CompositeAlarm resource types.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

A rollback trigger CloudFormation monitors during creation and updating of stacks. If any of the alarms you specify goes\n to ALARM state during the stack operation or within the specified monitoring period afterwards, CloudFormation rolls back the entire stack operation.

" + "smithy.api#documentation": "

A rollback trigger CloudFormation monitors during creation and updating of stacks. If any of the\n alarms you specify goes to ALARM state during the stack operation or within the specified\n monitoring period afterwards, CloudFormation rolls back the entire stack operation.

" } }, "com.amazonaws.cloudformation#RollbackTriggers": { @@ -11091,13 +11091,13 @@ "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { - "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see\n Resource type support In the CloudFormation User Guide\n

" + "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of\n supported resources, see Resource type\n support for imports and drift detection In the\n CloudFormation User Guide\n

" } }, "ResourceIdentifier": { "target": "com.amazonaws.cloudformation#JazzResourceIdentifierProperties", "traits": { - "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies for the scanned resource. The key is the name of one of the\n primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list\n in the resource schema.) The value is the value of that primary identifier. For example, for a\n AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair\n could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension\n development.

" + "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies for the scanned resource. The key is the\n name of one of the primary identifiers for the resource. (Primary identifiers are specified in\n the primaryIdentifier list in the resource schema.) The value is the value of that\n primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary\n identifiers is TableName so the key-value pair could be \"TableName\":\n \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide.

" } }, "ManagedByStack": { @@ -11108,7 +11108,7 @@ } }, "traits": { - "smithy.api#documentation": "

A scanned resource returned by ListResourceScanResources or\n ListResourceScanRelatedResources.

" + "smithy.api#documentation": "

A scanned resource returned by ListResourceScanResources or\n ListResourceScanRelatedResources.

" } }, "com.amazonaws.cloudformation#ScannedResourceIdentifier": { @@ -11118,7 +11118,7 @@ "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see\n IaC\n generator supported resource types In the CloudFormation User Guide.

", + "smithy.api#documentation": "

The type of the resource, such as AWS::DynamoDB::Table. For the list of\n supported resources, see Resource type\n support for imports and drift detection In the\n CloudFormation User Guide.

", "smithy.api#required": {} } }, @@ -11126,13 +11126,13 @@ "target": "com.amazonaws.cloudformation#JazzResourceIdentifierProperties", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the\n primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list\n in the resource schema.) The value is the value of that primary identifier. For example, for a\n AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair\n could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension\n development.

", + "smithy.api#documentation": "

A list of up to 256 key-value pairs that identifies the scanned resource. The key is the\n name of one of the primary identifiers for the resource. (Primary identifiers are specified in\n the primaryIdentifier list in the resource schema.) The value is the value of that\n primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary\n identifiers is TableName so the key-value pair could be \"TableName\":\n \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface (CLI) User Guide.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Identifies a scanned resource. This is used with the ListResourceScanRelatedResources API\n action.

" + "smithy.api#documentation": "

Identifies a scanned resource. This is used with the\n ListResourceScanRelatedResources API action.

" } }, "com.amazonaws.cloudformation#ScannedResourceIdentifiers": { @@ -11179,13 +11179,13 @@ "StackPolicyBody": { "target": "com.amazonaws.cloudformation#StackPolicyBody", "traits": { - "smithy.api#documentation": "

Structure containing the stack policy body. For more information, go to Prevent updates to stack resources in\n the CloudFormation User Guide. You can specify either the StackPolicyBody or the\n StackPolicyURL parameter, but not both.

" + "smithy.api#documentation": "

Structure containing the stack policy body. For more information, see Prevent updates to stack resources in the CloudFormation User Guide.\n You can specify either the StackPolicyBody or the StackPolicyURL\n parameter, but not both.

" } }, "StackPolicyURL": { "target": "com.amazonaws.cloudformation#StackPolicyURL", "traits": { - "smithy.api#documentation": "

Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in\n an Amazon S3 bucket in the same Amazon Web Services Region as the stack. The location for an Amazon S3 bucket must\n start with https://. You can specify either the StackPolicyBody or the\n StackPolicyURL parameter, but not both.

" + "smithy.api#documentation": "

Location of a file containing the stack policy. The URL must point to a policy (maximum\n size: 16 KB) located in an Amazon S3 bucket in the same Amazon Web Services Region as the stack. The location for\n an Amazon S3 bucket must start with https://. You can specify either the\n StackPolicyBody or the StackPolicyURL parameter, but not\n both.

" } } }, @@ -11211,7 +11211,7 @@ } ], "traits": { - "smithy.api#documentation": "

Specifies the configuration data for a registered CloudFormation extension, in the given account and Region.

\n

To view the current configuration data for an extension, refer to the ConfigurationSchema element\n of DescribeType. For more information, see Configuring extensions at\n the account level in the CloudFormation User Guide.

\n \n

It's strongly recommended that you use dynamic references to restrict sensitive configuration definitions, such\n as third-party credentials. For more details on dynamic references, see Using\n dynamic references to specify template values in the CloudFormation User Guide.

\n
" + "smithy.api#documentation": "

Specifies the configuration data for a registered CloudFormation extension, in the given\n account and Region.

\n

To view the current configuration data for an extension, refer to the\n ConfigurationSchema element of DescribeType. For\n more information, see Edit configuration\n data for extensions in your account in the\n CloudFormation User Guide.

\n \n

It's strongly recommended that you use dynamic references to restrict sensitive\n configuration definitions, such as third-party credentials. For more details on dynamic\n references, see Specify values stored in\n other services using dynamic references in the\n CloudFormation User Guide.

\n
" } }, "com.amazonaws.cloudformation#SetTypeConfigurationInput": { @@ -11220,33 +11220,33 @@ "TypeArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account\n and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account\n and Region.

\n

Do not include the extension versions suffix at the end of the ARN. You can set the configuration for an\n extension, but not for a specific extension version.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this\n will be the ARN assigned when you call the RegisterType API\n operation in this account and Region.

\n

Do not include the extension versions suffix at the end of the ARN. You can set the\n configuration for an extension, but not for a specific extension version.

" } }, "Configuration": { "target": "com.amazonaws.cloudformation#TypeConfiguration", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The configuration data for the extension, in this account and Region.

\n

The configuration data must be formatted as JSON, and validate against the schema returned in the\n ConfigurationSchema response element of DescribeType. For more information, see\n Defining\n account-level configuration data for an extension in the CloudFormation CLI User\n Guide.

", + "smithy.api#documentation": "

The configuration data for the extension, in this account and Region.

\n

The configuration data must be formatted as JSON, and validate against the schema returned\n in the ConfigurationSchema response element of DescribeType. For\n more information, see Defining the account-level configuration of an extension in the\n CloudFormation Command Line Interface (CLI) User Guide.

", "smithy.api#required": {} } }, "ConfigurationAlias": { "target": "com.amazonaws.cloudformation#TypeConfigurationAlias", "traits": { - "smithy.api#documentation": "

An alias by which to refer to this extension configuration data.

\n

Conditional: Specifying a configuration alias is required when setting a configuration for a resource type\n extension.

" + "smithy.api#documentation": "

An alias by which to refer to this extension configuration data.

\n

Conditional: Specifying a configuration alias is required when setting a configuration for\n a resource type extension.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify ConfigurationArn, or Type and\n TypeName.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify ConfigurationArn, or Type and\n TypeName.

" } }, "Type": { "target": "com.amazonaws.cloudformation#ThirdPartyType", "traits": { - "smithy.api#documentation": "

The type of extension.

\n

Conditional: You must specify ConfigurationArn, or Type and\n TypeName.

" + "smithy.api#documentation": "

The type of extension.

\n

Conditional: You must specify ConfigurationArn, or Type and\n TypeName.

" } } }, @@ -11260,7 +11260,7 @@ "ConfigurationArn": { "target": "com.amazonaws.cloudformation#TypeConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the configuration data, in this account and Region.

\n

Conditional: You must specify ConfigurationArn, or Type and\n TypeName.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the configuration data, in this account and\n Region.

\n

Conditional: You must specify ConfigurationArn, or Type and\n TypeName.

" } } }, @@ -11285,7 +11285,7 @@ } ], "traits": { - "smithy.api#documentation": "

Specify the default version of an extension. The default version of an extension will be used in CloudFormation operations.

", + "smithy.api#documentation": "

Specify the default version of an extension. The default version of an extension will be\n used in CloudFormation operations.

", "smithy.api#idempotent": {} } }, @@ -11295,25 +11295,25 @@ "Arn": { "target": "com.amazonaws.cloudformation#PrivateTypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension for which you want version summary information.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension for which you want version summary\n information.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "Type": { "target": "com.amazonaws.cloudformation#RegistryType", "traits": { - "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The kind of extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or Arn.

" + "smithy.api#documentation": "

The name of the extension.

\n

Conditional: You must specify either TypeName and Type, or\n Arn.

" } }, "VersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource\n Name (ARN) assigned to the extension version when it is registered.

" + "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of\n the Amazon Resource Name (ARN) assigned to the extension version when it is registered.

" } } }, @@ -11337,7 +11337,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Sends a signal to the specified resource with a success or failure status. You can use the\n SignalResource operation in conjunction with a creation policy or update policy. CloudFormation doesn't\n proceed with a stack creation or update until resources receive the required number of signals or the timeout period\n is exceeded. The SignalResource operation is useful in cases where you want to send signals from\n anywhere other than an Amazon EC2 instance.

" + "smithy.api#documentation": "

Sends a signal to the specified resource with a success or failure status. You can use the\n SignalResource operation in conjunction with a creation policy or update\n policy. CloudFormation doesn't proceed with a stack creation or update until resources receive the\n required number of signals or the timeout period is exceeded. The SignalResource\n operation is useful in cases where you want to send signals from anywhere other than an Amazon EC2\n instance.

" } }, "com.amazonaws.cloudformation#SignalResourceInput": { @@ -11347,7 +11347,7 @@ "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The stack name or unique stack ID that includes the resource that you want to signal.

", + "smithy.api#documentation": "

The stack name or unique stack ID that includes the resource that you want to\n signal.

", "smithy.api#required": {} } }, @@ -11355,7 +11355,7 @@ "target": "com.amazonaws.cloudformation#LogicalResourceId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The logical ID of the resource that you want to signal. The logical ID is the name of the resource that given in\n the template.

", + "smithy.api#documentation": "

The logical ID of the resource that you want to signal. The logical ID is the name of the\n resource that given in the template.

", "smithy.api#required": {} } }, @@ -11363,7 +11363,7 @@ "target": "com.amazonaws.cloudformation#ResourceSignalUniqueId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling groups, specify the\n instance ID that you are signaling as the unique ID. If you send multiple signals to a single resource (such as\n signaling a wait condition), each signal requires a different unique ID.

", + "smithy.api#documentation": "

A unique ID of the signal. When you signal Amazon EC2 instances or Auto Scaling groups, specify the\n instance ID that you are signaling as the unique ID. If you send multiple signals to a single\n resource (such as signaling a wait condition), each signal requires a different unique\n ID.

", "smithy.api#required": {} } }, @@ -11371,7 +11371,7 @@ "target": "com.amazonaws.cloudformation#ResourceSignalStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The status of the signal, which is either success or failure. A failure signal causes CloudFormation to immediately\n fail the stack creation or update.

", + "smithy.api#documentation": "

The status of the signal, which is either success or failure. A failure signal causes\n CloudFormation to immediately fail the stack creation or update.

", "smithy.api#required": {} } } @@ -11433,13 +11433,13 @@ "LastUpdatedTime": { "target": "com.amazonaws.cloudformation#LastUpdatedTime", "traits": { - "smithy.api#documentation": "

The time the stack was last updated. This field will only be returned if the stack has been updated at least\n once.

" + "smithy.api#documentation": "

The time the stack was last updated. This field will only be returned if the stack has been\n updated at least once.

" } }, "RollbackConfiguration": { "target": "com.amazonaws.cloudformation#RollbackConfiguration", "traits": { - "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the specified\n monitoring period afterwards.

" + "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating\n operations, and for the specified monitoring period afterwards.

" } }, "StackStatus": { @@ -11489,7 +11489,7 @@ "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that's associated with the stack.\n During a stack operation, CloudFormation uses this role's credentials to make calls on your behalf.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that's associated with the stack. During a\n stack operation, CloudFormation uses this role's credentials to make calls on your behalf.

" } }, "Tags": { @@ -11501,43 +11501,43 @@ "EnableTerminationProtection": { "target": "com.amazonaws.cloudformation#EnableTerminationProtection", "traits": { - "smithy.api#documentation": "

Whether termination protection is enabled for the stack.

\n

For nested\n stacks, termination protection is set on the root stack and can't be changed directly on the nested stack.\n For more information, see Protecting a Stack From Being Deleted\n in the CloudFormation User Guide.

" + "smithy.api#documentation": "

Whether termination protection is enabled for the stack.

\n

For nested stacks,\n termination protection is set on the root stack and can't be changed directly on the nested\n stack. For more information, see Protect a CloudFormation\n stack from being deleted in the CloudFormation User Guide.

" } }, "ParentId": { "target": "com.amazonaws.cloudformation#StackId", "traits": { - "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this\n stack. For the first level of nested stacks, the root stack is also the parent stack.

\n

For more information, see Working with Nested Stacks in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the direct\n parent of this stack. For the first level of nested stacks, the root stack is also the parent\n stack.

\n

For more information, see Embed stacks within other\n stacks using nested stacks in the CloudFormation User Guide.

" } }, "RootId": { "target": "com.amazonaws.cloudformation#StackId", "traits": { - "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which\n the nested stack ultimately belongs.

\n

For more information, see Working with Nested Stacks in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the\n top-level stack to which the nested stack ultimately belongs.

\n

For more information, see Embed stacks within other\n stacks using nested stacks in the CloudFormation User Guide.

" } }, "DriftInformation": { "target": "com.amazonaws.cloudformation#StackDriftInformation", "traits": { - "smithy.api#documentation": "

Information about whether a stack's actual configuration differs, or has drifted, from its\n expected configuration, as defined in the stack template and any values specified as template parameters. For more\n information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "smithy.api#documentation": "

Information about whether a stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. For more information, see Detect\n unmanaged configuration changes to stacks and resources with drift detection.

" } }, "RetainExceptOnCreate": { "target": "com.amazonaws.cloudformation#RetainExceptOnCreate", "traits": { - "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls back. This includes\n newly created resources marked with a deletion policy of Retain.

\n

Default: false\n

" + "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls\n back. This includes newly created resources marked with a deletion policy of\n Retain.

\n

Default: false\n

" } }, "DeletionMode": { "target": "com.amazonaws.cloudformation#DeletionMode", "traits": { - "smithy.api#documentation": "

Specifies the deletion mode for the stack. Possible values are:

\n
    \n
  • \n

    \n STANDARD - Use the standard behavior. Specifying this value is the same as not specifying this\n parameter.

    \n
  • \n
  • \n

    \n FORCE_DELETE_STACK - Delete the stack if it's stuck in a DELETE_FAILED state due to\n resource deletion failure.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies the deletion mode for the stack. Possible values are:

\n
    \n
  • \n

    \n STANDARD - Use the standard behavior. Specifying this value is the same as\n not specifying this parameter.

    \n
  • \n
  • \n

    \n FORCE_DELETE_STACK - Delete the stack if it's stuck in a\n DELETE_FAILED state due to resource deletion failure.

    \n
  • \n
" } }, "DetailedStatus": { "target": "com.amazonaws.cloudformation#DetailedStatus", "traits": { - "smithy.api#documentation": "

The detailed status of the resource or stack. If CONFIGURATION_COMPLETE is present, the resource or\n resource configuration phase has completed and the stabilization of the resources is in progress. The stack sets\n CONFIGURATION_COMPLETE when all of the resources in the stack have reached that event. For more\n information, see CloudFormation stack deployment in\n the CloudFormation User Guide.

" + "smithy.api#documentation": "

The detailed status of the resource or stack. If CONFIGURATION_COMPLETE is\n present, the resource or resource configuration phase has completed and the stabilization of the\n resources is in progress. The stack sets CONFIGURATION_COMPLETE when all of the\n resources in the stack have reached that event. For more information, see Understand\n CloudFormation stack creation events in the CloudFormation User Guide.

" } } }, @@ -11587,19 +11587,19 @@ "target": "com.amazonaws.cloudformation#StackDriftStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Status of the stack's actual configuration compared to its expected template configuration.

\n
    \n
  • \n

    \n DRIFTED: The stack differs from its expected template configuration. A stack is considered to\n have drifted if one or more of its resources have drifted.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template\n configuration.

    \n
  • \n
  • \n

    \n IN_SYNC: The stack's actual configuration matches its expected template configuration.

    \n
  • \n
  • \n

    \n UNKNOWN: This value is reserved for future use.

    \n
  • \n
", + "smithy.api#documentation": "

Status of the stack's actual configuration compared to its expected template\n configuration.

\n
    \n
  • \n

    \n DRIFTED: The stack differs from its expected template configuration. A stack\n is considered to have drifted if one or more of its resources have drifted.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected\n template configuration.

    \n
  • \n
  • \n

    \n IN_SYNC: The stack's actual configuration matches its expected template\n configuration.

    \n
  • \n
  • \n

    \n UNKNOWN: This value is reserved for future use.

    \n
  • \n
", "smithy.api#required": {} } }, "LastCheckTimestamp": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

Most recent time when a drift detection operation was initiated on the stack, or any of its individual resources\n that support drift detection.

" + "smithy.api#documentation": "

Most recent time when a drift detection operation was initiated on the stack, or any of its\n individual resources that support drift detection.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about whether the stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template and any values\n specified as template parameters. A stack is considered to have drifted if one or more of its resources have\n drifted.

" + "smithy.api#documentation": "

Contains information about whether the stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. A stack is considered to have drifted if one or\n more of its resources have drifted.

" } }, "com.amazonaws.cloudformation#StackDriftInformationSummary": { @@ -11609,19 +11609,19 @@ "target": "com.amazonaws.cloudformation#StackDriftStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Status of the stack's actual configuration compared to its expected template configuration.

\n
    \n
  • \n

    \n DRIFTED: The stack differs from its expected template configuration. A stack is considered to\n have drifted if one or more of its resources have drifted.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected template\n configuration.

    \n
  • \n
  • \n

    \n IN_SYNC: The stack's actual configuration matches its expected template configuration.

    \n
  • \n
  • \n

    \n UNKNOWN: This value is reserved for future use.

    \n
  • \n
", + "smithy.api#documentation": "

Status of the stack's actual configuration compared to its expected template\n configuration.

\n
    \n
  • \n

    \n DRIFTED: The stack differs from its expected template configuration. A stack\n is considered to have drifted if one or more of its resources have drifted.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the stack differs from its expected\n template configuration.

    \n
  • \n
  • \n

    \n IN_SYNC: The stack's actual configuration matches its expected template\n configuration.

    \n
  • \n
  • \n

    \n UNKNOWN: This value is reserved for future use.

    \n
  • \n
", "smithy.api#required": {} } }, "LastCheckTimestamp": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

Most recent time when a drift detection operation was initiated on the stack, or any of its individual resources\n that support drift detection.

" + "smithy.api#documentation": "

Most recent time when a drift detection operation was initiated on the stack, or any of its\n individual resources that support drift detection.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about whether the stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template and any values\n specified as template parameters. A stack is considered to have drifted if one or more of its resources have\n drifted.

" + "smithy.api#documentation": "

Contains information about whether the stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. A stack is considered to have drifted if one or\n more of its resources have drifted.

" } }, "com.amazonaws.cloudformation#StackDriftStatus": { @@ -11695,7 +11695,7 @@ "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { - "smithy.api#documentation": "

Type of resource. (For more information, go to Amazon Web Services Resource Types\n Reference in the CloudFormation User Guide.)

" + "smithy.api#documentation": "

Type of resource. For more information, see Amazon Web Services resource and\n property types reference in the CloudFormation User Guide.

" } }, "Timestamp": { @@ -11727,7 +11727,7 @@ "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

The token passed to the operation that generated this event.

\n

All events triggered by a given stack operation are assigned the same client request token, which you can use to\n track operations. For example, if you execute a CreateStack operation with the token\n token1, then all the StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack operations that are\n initiated from the console use the token format Console-StackOperation-ID, which helps you\n easily identify the stack operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "smithy.api#documentation": "

The token passed to the operation that generated this event.

\n

All events triggered by a given stack operation are assigned the same client request token,\n which you can use to track operations. For example, if you execute a CreateStack\n operation with the token token1, then all the StackEvents generated by\n that operation will have ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack\n operations that are initiated from the console use the token format\n Console-StackOperation-ID, which helps you easily identify the stack\n operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "HookType": { @@ -11763,7 +11763,7 @@ "DetailedStatus": { "target": "com.amazonaws.cloudformation#DetailedStatus", "traits": { - "smithy.api#documentation": "

An optional field containing information about the detailed status of the stack event.

\n
    \n
  • \n

    \n CONFIGURATION_COMPLETE - all of the resources in the stack have reached that event. For more\n information, see CloudFormation stack deployment\n in the CloudFormation User Guide.

    \n
  • \n
\n
    \n
  • \n

    \n VALIDATION_FAILED - template validation failed because of invalid properties in the template. The\n ResourceStatusReason field shows what properties are defined incorrectly.

    \n
  • \n
" + "smithy.api#documentation": "

An optional field containing information about the detailed status of the stack\n event.

\n \n
    \n
  • \n

    \n VALIDATION_FAILED - template validation failed because of invalid properties\n in the template. The ResourceStatusReason field shows what properties are defined\n incorrectly.

    \n
  • \n
" } } }, @@ -11873,7 +11873,7 @@ } }, "traits": { - "smithy.api#documentation": "

An CloudFormation stack, in a specific account and Region, that's part of a stack set operation. A stack instance is a\n reference to an attempted or actual stack in a given account within a given Region. A stack instance can exist\n without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with\n only one stack set. Each stack instance contains the ID of its associated stack set, in addition to the ID of the\n actual stack and the stack status.

" + "smithy.api#documentation": "

A CloudFormation stack, in a specific account and Region, that's part of a stack set operation. A stack instance is a\n reference to an attempted or actual stack in a given account within a given Region. A stack instance can exist\n without a stack—for example, if the stack couldn't be created for some reason. A stack instance is associated with\n only one stack set. Each stack instance contains the ID of its associated stack set, in addition to the ID of the\n actual stack and the stack status.

" } }, "com.amazonaws.cloudformation#StackInstanceComprehensiveStatus": { @@ -12066,7 +12066,7 @@ "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Type of resource. For more information, go to Amazon Web Services Resource Types\n Reference in the CloudFormation User Guide.

", + "smithy.api#documentation": "

Type of resource. For more information, see Amazon Web Services resource and \n property types reference in the CloudFormation User Guide.

", "smithy.api#required": {} } }, @@ -12291,14 +12291,14 @@ "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

" + "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource\n supported by CloudFormation.

" } }, "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Type of resource. For more information, go to Amazon Web Services Resource Types\n Reference in the CloudFormation User Guide.

", + "smithy.api#documentation": "

Type of resource. For more information, see Amazon Web Services resource and\n property types reference in the CloudFormation User Guide.

", "smithy.api#required": {} } }, @@ -12333,13 +12333,13 @@ "DriftInformation": { "target": "com.amazonaws.cloudformation#StackResourceDriftInformation", "traits": { - "smithy.api#documentation": "

Information about whether the resource's actual configuration differs, or has drifted, from\n its expected configuration, as defined in the stack template and any values specified as template parameters. For\n more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "smithy.api#documentation": "

Information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. For more information, see Detect\n unmanaged configuration changes to stacks and resources with drift detection.

" } }, "ModuleInfo": { "target": "com.amazonaws.cloudformation#ModuleInfo", "traits": { - "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource was created from a\n module included in the stack template.

" + "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource\n was created from a module included in the stack template.

" } } }, @@ -12373,14 +12373,14 @@ "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource supported by CloudFormation.

" + "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource\n supported by CloudFormation.

" } }, "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Type of resource. For more information, go to Amazon Web Services Resource Types\n Reference in the CloudFormation User Guide.

", + "smithy.api#documentation": "

Type of resource. For more information, see Amazon Web Services resource and\n property types reference in the CloudFormation User Guide.

", "smithy.api#required": {} } }, @@ -12415,19 +12415,19 @@ "Metadata": { "target": "com.amazonaws.cloudformation#Metadata", "traits": { - "smithy.api#documentation": "

The content of the Metadata attribute declared for the resource. For more information, see Metadata Attribute\n in the CloudFormation User Guide.

" + "smithy.api#documentation": "

The content of the Metadata attribute declared for the resource. For more\n information, see Metadata attribute\n in the CloudFormation User Guide.

" } }, "DriftInformation": { "target": "com.amazonaws.cloudformation#StackResourceDriftInformation", "traits": { - "smithy.api#documentation": "

Information about whether the resource's actual configuration differs, or has drifted, from\n its expected configuration, as defined in the stack template and any values specified as template parameters. For\n more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "smithy.api#documentation": "

Information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. For more information, see Detect\n unmanaged configuration changes to stacks and resources with drift detection.

" } }, "ModuleInfo": { "target": "com.amazonaws.cloudformation#ModuleInfo", "traits": { - "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource was created from a\n module included in the stack template.

" + "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource\n was created from a module included in the stack template.

" } } }, @@ -12457,13 +12457,13 @@ "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource supported by\n CloudFormation.

" + "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of a resource\n supported by CloudFormation.

" } }, "PhysicalResourceIdContext": { "target": "com.amazonaws.cloudformation#PhysicalResourceIdContext", "traits": { - "smithy.api#documentation": "

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses context key-value pairs in\n cases where a resource's logical and physical IDs aren't enough to uniquely identify that resource. Each context\n key-value pair specifies a unique resource that contains the targeted resource.

" + "smithy.api#documentation": "

Context information that enables CloudFormation to uniquely identify a resource. CloudFormation uses\n context key-value pairs in cases where a resource's logical and physical IDs aren't enough to\n uniquely identify that resource. Each context key-value pair specifies a unique resource that\n contains the targeted resource.

" } }, "ResourceType": { @@ -12477,26 +12477,26 @@ "ExpectedProperties": { "target": "com.amazonaws.cloudformation#Properties", "traits": { - "smithy.api#documentation": "

A JSON structure containing the expected property values of the stack resource, as defined in the stack template\n and any values specified as template parameters.

\n

For resources whose StackResourceDriftStatus is DELETED, this structure will not be\n present.

" + "smithy.api#documentation": "

A JSON structure containing the expected property values of the stack resource, as defined\n in the stack template and any values specified as template parameters.

\n

For resources whose StackResourceDriftStatus is DELETED, this\n structure will not be present.

" } }, "ActualProperties": { "target": "com.amazonaws.cloudformation#Properties", "traits": { - "smithy.api#documentation": "

A JSON structure containing the actual property values of the stack resource.

\n

For resources whose StackResourceDriftStatus is DELETED, this structure will not be\n present.

" + "smithy.api#documentation": "

A JSON structure containing the actual property values of the stack resource.

\n

For resources whose StackResourceDriftStatus is DELETED, this\n structure will not be present.

" } }, "PropertyDifferences": { "target": "com.amazonaws.cloudformation#PropertyDifferences", "traits": { - "smithy.api#documentation": "

A collection of the resource properties whose actual values differ from their expected values. These will be\n present only for resources whose StackResourceDriftStatus is\n MODIFIED.

" + "smithy.api#documentation": "

A collection of the resource properties whose actual values differ from their expected\n values. These will be present only for resources whose StackResourceDriftStatus is\n MODIFIED.

" } }, "StackResourceDriftStatus": { "target": "com.amazonaws.cloudformation#StackResourceDriftStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Status of the resource's actual configuration compared to its expected configuration.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected template configuration because the resource has\n been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: One or more resource properties differ from their expected values (as defined in the\n stack template and any values specified as template parameters).

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected template configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation does not currently return this value.

    \n
  • \n
", + "smithy.api#documentation": "

Status of the resource's actual configuration compared to its expected configuration.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected template configuration\n because the resource has been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: One or more resource properties differ from their expected values\n (as defined in the stack template and any values specified as template parameters).

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected template\n configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation does not currently return this value.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -12511,12 +12511,12 @@ "ModuleInfo": { "target": "com.amazonaws.cloudformation#ModuleInfo", "traits": { - "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource was created from a\n module included in the stack template.

" + "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource\n was created from a module included in the stack template.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the drift information for a resource that has been checked for drift. This includes actual and expected\n property values for resources in which CloudFormation has detected drift. Only resource properties explicitly defined in the\n stack template are checked for drift. For more information, see Detecting Unregulated Configuration Changes to\n Stacks and Resources.

\n

Resources that don't currently support drift detection can't be checked. For a list of resources that support\n drift detection, see Resources that Support Drift\n Detection.

\n

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

" + "smithy.api#documentation": "

Contains the drift information for a resource that has been checked for drift. This includes\n actual and expected property values for resources in which CloudFormation has detected drift. Only\n resource properties explicitly defined in the stack template are checked for drift. For more\n information, see Detect unmanaged\n configuration changes to stacks and resources with drift detection.

\n

Resources that don't currently support drift detection can't be checked. For a list of\n resources that support drift detection, see Resource type\n support for imports and drift detection.

\n

Use DetectStackResourceDrift to detect drift on individual resources, or\n DetectStackDrift to detect drift on all resources in a given stack that\n support drift detection.

" } }, "com.amazonaws.cloudformation#StackResourceDriftInformation": { @@ -12526,19 +12526,19 @@ "target": "com.amazonaws.cloudformation#StackResourceDriftStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Status of the resource's actual configuration compared to its expected configuration

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected configuration in that it has been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: The resource differs from its expected configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation has not checked if the resource differs from its expected\n configuration.

    \n

    Any resources that do not currently support drift detection have a status of NOT_CHECKED. For\n more information, see Resources that Support Drift\n Detection.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected configuration.

    \n
  • \n
", + "smithy.api#documentation": "

Status of the resource's actual configuration compared to its expected configuration

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected configuration in that it has\n been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: The resource differs from its expected configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation has not checked if the resource differs from its\n expected configuration.

    \n

    Any resources that do not currently support drift detection have a status of\n NOT_CHECKED. For more information, see Resource\n type support for imports and drift detection.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected\n configuration.

    \n
  • \n
", "smithy.api#required": {} } }, "LastCheckTimestamp": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

When CloudFormation last checked if the resource had drifted from its expected configuration.

" + "smithy.api#documentation": "

When CloudFormation last checked if the resource had drifted from its expected\n configuration.

" } } }, "traits": { - "smithy.api#documentation": "

Contains information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration.

" + "smithy.api#documentation": "

Contains information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration.

" } }, "com.amazonaws.cloudformation#StackResourceDriftInformationSummary": { @@ -12548,19 +12548,19 @@ "target": "com.amazonaws.cloudformation#StackResourceDriftStatus", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Status of the resource's actual configuration compared to its expected configuration.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected configuration in that it has been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: The resource differs from its expected configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the resource differs from its expected configuration.

    \n

    Any resources that don't currently support drift detection have a status of NOT_CHECKED. For more\n information, see Resources that Support Drift\n Detection. If you performed an ContinueUpdateRollback operation on a stack, any resources\n included in ResourcesToSkip will also have a status of NOT_CHECKED. For more information\n about skipping resources during rollback operations, see Continue Rolling\n Back an Update in the CloudFormation User Guide.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected configuration.

    \n
  • \n
", + "smithy.api#documentation": "

Status of the resource's actual configuration compared to its expected configuration.

\n
    \n
  • \n

    \n DELETED: The resource differs from its expected configuration in that it has\n been deleted.

    \n
  • \n
  • \n

    \n MODIFIED: The resource differs from its expected configuration.

    \n
  • \n
  • \n

    \n NOT_CHECKED: CloudFormation hasn't checked if the resource differs from its\n expected configuration.

    \n

    Any resources that don't currently support drift detection have a status of\n NOT_CHECKED. For more information, see Resource\n type support for imports and drift detection. If you performed an ContinueUpdateRollback operation on a stack, any resources included in\n ResourcesToSkip will also have a status of NOT_CHECKED. For more\n information about skipping resources during rollback operations, see Continue rolling back an update in the CloudFormation User Guide.

    \n
  • \n
  • \n

    \n IN_SYNC: The resource's actual configuration matches its expected\n configuration.

    \n
  • \n
", "smithy.api#required": {} } }, "LastCheckTimestamp": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

When CloudFormation last checked if the resource had drifted from its expected configuration.

" + "smithy.api#documentation": "

When CloudFormation last checked if the resource had drifted from its expected\n configuration.

" } } }, "traits": { - "smithy.api#documentation": "

Summarizes information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration.

" + "smithy.api#documentation": "

Summarizes information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration.

" } }, "com.amazonaws.cloudformation#StackResourceDriftStatus": { @@ -12630,14 +12630,14 @@ "PhysicalResourceId": { "target": "com.amazonaws.cloudformation#PhysicalResourceId", "traits": { - "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of the resource.

" + "smithy.api#documentation": "

The name or unique identifier that corresponds to a physical instance ID of the\n resource.

" } }, "ResourceType": { "target": "com.amazonaws.cloudformation#ResourceType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Type of resource. (For more information, go to Amazon Web Services Resource Types\n Reference in the CloudFormation User Guide.)

", + "smithy.api#documentation": "

Type of resource. (For more information, see Amazon Web Services resource and\n property types reference in the CloudFormation User Guide.)

", "smithy.api#required": {} } }, @@ -12666,13 +12666,13 @@ "DriftInformation": { "target": "com.amazonaws.cloudformation#StackResourceDriftInformationSummary", "traits": { - "smithy.api#documentation": "

Information about whether the resource's actual configuration differs, or has drifted, from\n its expected configuration, as defined in the stack template and any values specified as template parameters. For\n more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

" + "smithy.api#documentation": "

Information about whether the resource's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. For more information, see Detect\n unmanaged configuration changes to stacks and resources with drift detection.

" } }, "ModuleInfo": { "target": "com.amazonaws.cloudformation#ModuleInfo", "traits": { - "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource was created from a\n module included in the stack template.

" + "smithy.api#documentation": "

Contains information about the module from which the resource was created, if the resource\n was created from a module included in the stack template.

" } } }, @@ -12728,7 +12728,7 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can\n affect permissions in your Amazon Web Services account—for example, by creating new Identity and Access Management (IAM) users. For more information, see Acknowledging IAM Resources in CloudFormation\n Templates.\n

" + "smithy.api#documentation": "

The capabilities that are allowed in the stack set. Some stack set templates might include resources that can\n affect permissions in your Amazon Web Services account—for example, by creating new Identity and Access Management (IAM) users. For more information, see Acknowledging IAM resources in\n CloudFormation templates.

" } }, "Tags": { @@ -13009,7 +13009,7 @@ "Status": { "target": "com.amazonaws.cloudformation#StackSetOperationStatus", "traits": { - "smithy.api#documentation": "

The status of the operation.

\n
    \n
  • \n

    \n FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that\n you've set for an operation is applied for each Region during stack create and update operations. If the number of\n failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to\n FAILED. This in turn sets the status of the operation as a whole to FAILED, and CloudFormation cancels the operation in any remaining Regions.

    \n
  • \n
  • \n

    \n QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of\n operations, the operation is queued to be performed. For more information, see the stack set operation status\n codes in the CloudFormation User Guide.

    \n
  • \n
  • \n

    \n RUNNING: The operation is currently being performed.

    \n
  • \n
  • \n

    \n STOPPED: The user has canceled the operation.

    \n
  • \n
  • \n

    \n STOPPING: The operation is in the process of stopping, at user request.

    \n
  • \n
  • \n

    \n SUCCEEDED: The operation completed creating or updating all the specified stacks without\n exceeding the failure tolerance for the operation.

    \n
  • \n
" + "smithy.api#documentation": "

The status of the operation.

\n
    \n
  • \n

    \n FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that\n you've set for an operation is applied for each Region during stack create and update operations. If the number of\n failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to\n FAILED. This in turn sets the status of the operation as a whole to FAILED, and CloudFormation cancels the operation in any remaining Regions.

    \n
  • \n
  • \n

    \n QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of\n operations, the operation is queued to be performed. For more information, see the stack set operation status\n codes in the CloudFormation User Guide.

    \n
  • \n
  • \n

    \n RUNNING: The operation is currently being performed.

    \n
  • \n
  • \n

    \n STOPPED: The user has canceled the operation.

    \n
  • \n
  • \n

    \n STOPPING: The operation is in the process of stopping, at user request.

    \n
  • \n
  • \n

    \n SUCCEEDED: The operation completed creating or updating all the specified stacks without\n exceeding the failure tolerance for the operation.

    \n
  • \n
" } }, "OperationPreferences": { @@ -13057,7 +13057,7 @@ "StackSetDriftDetectionDetails": { "target": "com.amazonaws.cloudformation#StackSetDriftDetectionDetails", "traits": { - "smithy.api#documentation": "

Detailed information about the drift status of the stack set. This includes information about drift operations\n currently being performed on the stack set.

\n

This information will only be present for stack set operations whose Action type is\n DETECT_DRIFT.

\n

For more information, see Detecting Unmanaged Changes in Stack Sets in\n the CloudFormation User Guide.

" + "smithy.api#documentation": "

Detailed information about the drift status of the stack set. This includes information about drift operations\n currently being performed on the stack set.

\n

This information will only be present for stack set operations whose Action type is\n DETECT_DRIFT.

\n

For more information, see Detect stack set drift in\n the CloudFormation User Guide.

" } }, "StatusReason": { @@ -13153,7 +13153,7 @@ } }, "traits": { - "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation\n options.

" + "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation\n options.

" } }, "com.amazonaws.cloudformation#StackSetOperationResultStatus": { @@ -13323,7 +13323,7 @@ "Status": { "target": "com.amazonaws.cloudformation#StackSetOperationStatus", "traits": { - "smithy.api#documentation": "

The overall status of the operation.

\n
    \n
  • \n

    \n FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that\n you've set for an operation is applied for each Region during stack create and update operations. If the number of\n failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to\n FAILED. This in turn sets the status of the operation as a whole to FAILED, and CloudFormation cancels the operation in any remaining Regions.

    \n
  • \n
  • \n

    \n QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of\n operations, the operation is queued to be performed. For more information, see the stack set operation status\n codes in the CloudFormation User Guide.

    \n
  • \n
  • \n

    \n RUNNING: The operation is currently being performed.

    \n
  • \n
  • \n

    \n STOPPED: The user has canceled the operation.

    \n
  • \n
  • \n

    \n STOPPING: The operation is in the process of stopping, at user request.

    \n
  • \n
  • \n

    \n SUCCEEDED: The operation completed creating or updating all the specified stacks without\n exceeding the failure tolerance for the operation.

    \n
  • \n
" + "smithy.api#documentation": "

The overall status of the operation.

\n
    \n
  • \n

    \n FAILED: The operation exceeded the specified failure tolerance. The failure tolerance value that\n you've set for an operation is applied for each Region during stack create and update operations. If the number of\n failed stacks within a Region exceeds the failure tolerance, the status of the operation in the Region is set to\n FAILED. This in turn sets the status of the operation as a whole to FAILED, and CloudFormation cancels the operation in any remaining Regions.

    \n
  • \n
  • \n

    \n QUEUED: [Service-managed permissions] For automatic deployments that require a sequence of\n operations, the operation is queued to be performed. For more information, see the stack set operation status\n codes in the CloudFormation User Guide.

    \n
  • \n
  • \n

    \n RUNNING: The operation is currently being performed.

    \n
  • \n
  • \n

    \n STOPPED: The user has canceled the operation.

    \n
  • \n
  • \n

    \n STOPPING: The operation is in the process of stopping, at user request.

    \n
  • \n
  • \n

    \n SUCCEEDED: The operation completed creating or updating all the specified stacks without\n exceeding the failure tolerance for the operation.

    \n
  • \n
" } }, "CreationTimestamp": { @@ -13353,7 +13353,7 @@ "OperationPreferences": { "target": "com.amazonaws.cloudformation#StackSetOperationPreferences", "traits": { - "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation\n options.

" + "smithy.api#documentation": "

The user-specified preferences for how CloudFormation performs a stack set operation.

\n

For more information about maximum concurrent accounts and failure tolerance, see Stack set operation\n options.

" } } }, @@ -13638,7 +13638,7 @@ "LastUpdatedTime": { "target": "com.amazonaws.cloudformation#LastUpdatedTime", "traits": { - "smithy.api#documentation": "

The time the stack was last updated. This field will only be returned if the stack has been updated at least\n once.

" + "smithy.api#documentation": "

The time the stack was last updated. This field will only be returned if the stack has been\n updated at least once.

" } }, "DeletionTime": { @@ -13664,19 +13664,19 @@ "ParentId": { "target": "com.amazonaws.cloudformation#StackId", "traits": { - "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this\n stack. For the first level of nested stacks, the root stack is also the parent stack.

\n

For more information, see Working with Nested Stacks in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the direct\n parent of this stack. For the first level of nested stacks, the root stack is also the parent\n stack.

\n

For more information, see Embed stacks within other\n stacks using nested stacks in the CloudFormation User Guide.

" } }, "RootId": { "target": "com.amazonaws.cloudformation#StackId", "traits": { - "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which\n the nested stack ultimately belongs.

\n

For more information, see Working with Nested Stacks in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For nested stacks--stacks created as resources for another stack--the stack ID of the\n top-level stack to which the nested stack ultimately belongs.

\n

For more information, see Embed stacks within other\n stacks using nested stacks in the CloudFormation User Guide.

" } }, "DriftInformation": { "target": "com.amazonaws.cloudformation#StackDriftInformationSummary", "traits": { - "smithy.api#documentation": "

Summarizes information about whether a stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template and any values\n specified as template parameters. For more information, see Detecting Unregulated Configuration Changes to\n Stacks and Resources.

" + "smithy.api#documentation": "

Summarizes information about whether a stack's actual configuration differs, or has\n drifted, from its expected configuration, as defined in the stack template\n and any values specified as template parameters. For more information, see Detect\n unmanaged configuration changes to stacks and resources with drift detection.

" } } }, @@ -13730,7 +13730,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a scan of the resources in this account in this Region. You can the status of a scan using the\n ListResourceScans API action.

", + "smithy.api#documentation": "

Starts a scan of the resources in this account in this Region. You can the status of a\n scan using the ListResourceScans API action.

", "smithy.api#examples": [ { "title": "To start a resource scan", @@ -13748,7 +13748,7 @@ "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this StartResourceScan request. Specify this token if you plan to retry\n requests so that CloudFormation knows that you're not attempting to start a new resource scan.

" + "smithy.api#documentation": "

A unique identifier for this StartResourceScan request. Specify this token if\n you plan to retry requests so that CloudFormation knows that you're not attempting to start a new\n resource scan.

" } } }, @@ -13762,7 +13762,7 @@ "ResourceScanId": { "target": "com.amazonaws.cloudformation#ResourceScanId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource scan. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:resourceScan/${Id}. An example is\n arn:aws:cloudformation:us-east-1:123456789012:resourceScan/f5b490f7-7ed4-428a-aa06-31ff25db0772\n .

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource scan. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:resourceScan/${Id}. An\n example is\n arn:aws:cloudformation:us-east-1:123456789012:resourceScan/f5b490f7-7ed4-428a-aa06-31ff25db0772\n .

" } } }, @@ -13824,7 +13824,7 @@ "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -13861,7 +13861,7 @@ "target": "com.amazonaws.cloudformation#TagKey", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

\n Required. A string used to identify this tag. You can specify a maximum of 128 characters\n for a tag key. Tags owned by Amazon Web Services (Amazon Web Services) have the reserved prefix:\n aws:.

", + "smithy.api#documentation": "

\n Required. A string used to identify this tag. You can specify a maximum of\n 128 characters for a tag key. Tags owned by Amazon Web Services have the reserved prefix:\n aws:.

", "smithy.api#required": {} } }, @@ -13869,13 +13869,13 @@ "target": "com.amazonaws.cloudformation#TagValue", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

\n Required. A string containing the value for this tag. You can specify a maximum of 256\n characters for a tag value.

", + "smithy.api#documentation": "

\n Required. A string containing the value for this tag. You can specify a\n maximum of 256 characters for a tag value.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The Tag type enables you to specify a key-value pair that can be used to store information about an CloudFormation stack.

" + "smithy.api#documentation": "

The Tag type enables you to specify a key-value pair that can be used to store information\n about an CloudFormation stack.

" } }, "com.amazonaws.cloudformation#TagKey": { @@ -13922,13 +13922,13 @@ "DeletionPolicy": { "target": "com.amazonaws.cloudformation#GeneratedTemplateDeletionPolicy", "traits": { - "smithy.api#documentation": "

The DeletionPolicy assigned to resources in the generated template. Supported values are:

\n
    \n
  • \n

    \n DELETE - delete all resources when the stack is deleted.

    \n
  • \n
  • \n

    \n RETAIN - retain all resources when the stack is deleted.

    \n
  • \n
\n

For more information, see DeletionPolicy attribute in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

The DeletionPolicy assigned to resources in the generated template. Supported\n values are:

\n
    \n
  • \n

    \n DELETE - delete all resources when the stack is deleted.

    \n
  • \n
  • \n

    \n RETAIN - retain all resources when the stack is deleted.

    \n
  • \n
\n

For more information, see DeletionPolicy\n attribute in the CloudFormation User Guide.

" } }, "UpdateReplacePolicy": { "target": "com.amazonaws.cloudformation#GeneratedTemplateUpdateReplacePolicy", "traits": { - "smithy.api#documentation": "

The UpdateReplacePolicy assigned to resources in the generated template. Supported values\n are:

\n
    \n
  • \n

    \n DELETE - delete all resources when the resource is replaced during an update operation.

    \n
  • \n
  • \n

    \n RETAIN - retain all resources when the resource is replaced during an update operation.

    \n
  • \n
\n

For more information, see UpdateReplacePolicy\n attribute in the CloudFormation User Guide.

" + "smithy.api#documentation": "

The UpdateReplacePolicy assigned to resources in the generated template.\n Supported values are:

\n
    \n
  • \n

    \n DELETE - delete all resources when the resource is replaced during an update\n operation.

    \n
  • \n
  • \n

    \n RETAIN - retain all resources when the resource is replaced during an update\n operation.

    \n
  • \n
\n

For more information, see UpdateReplacePolicy attribute in the CloudFormation User Guide.

" } } }, @@ -13974,7 +13974,7 @@ "NoEcho": { "target": "com.amazonaws.cloudformation#NoEcho", "traits": { - "smithy.api#documentation": "

Flag indicating whether the parameter should be displayed as plain text in logs and UIs.

" + "smithy.api#documentation": "

Flag indicating whether the parameter should be displayed as plain text in logs and\n UIs.

" } }, "Description": { @@ -14064,7 +14064,7 @@ "GeneratedTemplateId": { "target": "com.amazonaws.cloudformation#GeneratedTemplateId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For\n example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

" } }, "GeneratedTemplateName": { @@ -14082,7 +14082,7 @@ "StatusReason": { "target": "com.amazonaws.cloudformation#TemplateStatusReason", "traits": { - "smithy.api#documentation": "

The reason for the current template generation status. This will provide more details if a failure\n happened.

" + "smithy.api#documentation": "

The reason for the current template generation status. This will provide more details if a\n failure happened.

" } }, "CreationTime": { @@ -14100,7 +14100,7 @@ "NumberOfResources": { "target": "com.amazonaws.cloudformation#NumberOfResources", "traits": { - "smithy.api#documentation": "

The number of resources in the generated template. This is a total of resources in pending, in-progress,\n completed, and failed states.

" + "smithy.api#documentation": "

The number of resources in the generated template. This is a total of resources in pending,\n in-progress, completed, and failed states.

" } } }, @@ -14114,7 +14114,7 @@ "TreatUnrecognizedResourceTypesAsWarnings": { "target": "com.amazonaws.cloudformation#TreatUnrecognizedResourceTypesAsWarnings", "traits": { - "smithy.api#documentation": "

If set to True, any unrecognized resource types generate warnings and not an error. Any\n unrecognized resource types are returned in the Warnings output parameter.

" + "smithy.api#documentation": "

If set to True, any unrecognized resource types generate warnings and not an\n error. Any unrecognized resource types are returned in the Warnings output\n parameter.

" } } }, @@ -14148,7 +14148,7 @@ } ], "traits": { - "smithy.api#documentation": "

Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

\n
    \n
  • \n

    For resource types, this includes passing all contracts tests defined for the type.

    \n
  • \n
  • \n

    For modules, this includes determining if the module's model meets all necessary requirements.

    \n
  • \n
\n

For more information, see Testing your public\n extension prior to publishing in the CloudFormation CLI User Guide.

\n

If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region\n for testing.

\n

To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more\n information, see RegisterType.

\n

Once you've initiated testing on an extension using TestType, you can pass the returned\n TypeVersionArn into DescribeType to monitor the current test\n status and test status description for the extension.

\n

An extension must have a test status of PASSED before it can be published. For more information,\n see Publishing\n extensions to make them available for public use in the CloudFormation CLI User\n Guide.

", + "smithy.api#documentation": "

Tests a registered extension to make sure it meets all necessary requirements for being\n published in the CloudFormation registry.

\n
    \n
  • \n

    For resource types, this includes passing all contracts tests defined for the\n type.

    \n
  • \n
  • \n

    For modules, this includes determining if the module's model meets all necessary\n requirements.

    \n
  • \n
\n

For more information, see Testing your public extension before publishing in the\n CloudFormation Command Line Interface (CLI) User Guide.

\n

If you don't specify a version, CloudFormation uses the default version of the extension in\n your account and Region for testing.

\n

To perform testing, CloudFormation assumes the execution role specified when the type was\n registered. For more information, see RegisterType.

\n

Once you've initiated testing on an extension using TestType, you can pass\n the returned TypeVersionArn into DescribeType to\n monitor the current test status and test status description for the extension.

\n

An extension must have a test status of PASSED before it can be published.\n For more information, see Publishing extensions\n to make them available for public use in the\n CloudFormation Command Line Interface (CLI) User Guide.

", "smithy.api#idempotent": {} } }, @@ -14158,31 +14158,31 @@ "Arn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the extension.

\n

Conditional: You must specify Arn, or TypeName and\n Type.

" } }, "Type": { "target": "com.amazonaws.cloudformation#ThirdPartyType", "traits": { - "smithy.api#documentation": "

The type of the extension to test.

\n

Conditional: You must specify Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The type of the extension to test.

\n

Conditional: You must specify Arn, or TypeName and\n Type.

" } }, "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension to test.

\n

Conditional: You must specify Arn, or TypeName and Type.

" + "smithy.api#documentation": "

The name of the extension to test.

\n

Conditional: You must specify Arn, or TypeName and\n Type.

" } }, "VersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The version of the extension to test.

\n

You can specify the version id with either Arn, or with TypeName and\n Type.

\n

If you don't specify a version, CloudFormation uses the default version of the extension in this account and Region\n for testing.

" + "smithy.api#documentation": "

The version of the extension to test.

\n

You can specify the version id with either Arn, or with TypeName\n and Type.

\n

If you don't specify a version, CloudFormation uses the default version of the extension in\n this account and Region for testing.

" } }, "LogDeliveryBucket": { "target": "com.amazonaws.cloudformation#S3Bucket", "traits": { - "smithy.api#documentation": "

The S3 bucket to which CloudFormation delivers the contract test execution logs.

\n

CloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a\n test type status of PASSED or FAILED.

\n

The user calling TestType must be able to access items in the specified S3 bucket. Specifically,\n the user needs the following permissions:

\n
    \n
  • \n

    \n GetObject\n

    \n
  • \n
  • \n

    \n PutObject\n

    \n
  • \n
\n

For more information, see Actions, Resources, and Condition Keys for Amazon S3 in the Amazon Web Services Identity and Access Management User Guide.

" + "smithy.api#documentation": "

The S3 bucket to which CloudFormation delivers the contract test execution logs.

\n

CloudFormation delivers the logs by the time contract testing has completed and the extension\n has been assigned a test type status of PASSED or FAILED.

\n

The user calling TestType must be able to access items in the specified S3\n bucket. Specifically, the user needs the following permissions:

\n
    \n
  • \n

    \n GetObject\n

    \n
  • \n
  • \n

    \n PutObject\n

    \n
  • \n
\n

For more information, see Actions, Resources, and\n Condition Keys for Amazon S3 in the Identity and Access Management User Guide.

" } } }, @@ -14342,31 +14342,31 @@ "Arn": { "target": "com.amazonaws.cloudformation#TypeConfigurationArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the configuration data, in this account and Region.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the configuration data, in this account and\n Region.

" } }, "Alias": { "target": "com.amazonaws.cloudformation#TypeConfigurationAlias", "traits": { - "smithy.api#documentation": "

The alias specified for this configuration, if one was specified when the configuration was set.

" + "smithy.api#documentation": "

The alias specified for this configuration, if one was specified when the configuration was\n set.

" } }, "Configuration": { "target": "com.amazonaws.cloudformation#TypeConfiguration", "traits": { - "smithy.api#documentation": "

A JSON string specifying the configuration data for the extension, in this account and Region.

\n

If a configuration hasn't been set for a specified extension, CloudFormation returns {}.

" + "smithy.api#documentation": "

A JSON string specifying the configuration data for the extension, in this account and\n Region.

\n

If a configuration hasn't been set for a specified extension, CloudFormation returns\n {}.

" } }, "LastUpdated": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

When the configuration data was last updated for this extension.

\n

If a configuration hasn't been set for a specified extension, CloudFormation returns null.

" + "smithy.api#documentation": "

When the configuration data was last updated for this extension.

\n

If a configuration hasn't been set for a specified extension, CloudFormation returns\n null.

" } }, "TypeArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account\n and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account\n and Region.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this\n will be the ARN assigned when you call the RegisterType API\n operation in this account and Region.

" } }, "TypeName": { @@ -14383,7 +14383,7 @@ } }, "traits": { - "smithy.api#documentation": "

Detailed information concerning the specification of a CloudFormation extension in a given account and\n Region.

\n

For more information, see Configuring extensions at\n the account level in the CloudFormation User Guide.

" + "smithy.api#documentation": "

Detailed information concerning the specification of a CloudFormation extension in a given\n account and Region.

\n

For more information, see Edit configuration data\n for extensions in your account in the CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#TypeConfigurationDetailsList": { @@ -14398,13 +14398,13 @@ "TypeArn": { "target": "com.amazonaws.cloudformation#TypeArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account\n and Region. For private extensions, this will be the ARN assigned when you call the RegisterType API operation in this account\n and Region.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the extension, in this account and Region.

\n

For public extensions, this will be the ARN assigned when you call the ActivateType API operation in this account and Region. For private extensions, this\n will be the ARN assigned when you call the RegisterType API\n operation in this account and Region.

" } }, "TypeConfigurationAlias": { "target": "com.amazonaws.cloudformation#TypeConfigurationAlias", "traits": { - "smithy.api#documentation": "

The alias specified for this configuration, if one was specified when the configuration was set.

" + "smithy.api#documentation": "

The alias specified for this configuration, if one was specified when the configuration was\n set.

" } }, "TypeConfigurationArn": { @@ -14447,7 +14447,7 @@ "Message": { "target": "com.amazonaws.cloudformation#ErrorMessage", "traits": { - "smithy.api#documentation": "

An message with details about the error that occurred.

" + "smithy.api#documentation": "

A message with details about the error that occurred.

" } } }, @@ -14467,13 +14467,13 @@ "Category": { "target": "com.amazonaws.cloudformation#Category", "traits": { - "smithy.api#documentation": "

The category of extensions to return.

\n
    \n
  • \n

    \n REGISTERED: Private extensions that have been registered for this account and Region.

    \n
  • \n
  • \n

    \n ACTIVATED: Public extensions that have been activated for this account and Region.

    \n
  • \n
  • \n

    \n THIRD_PARTY: Extensions available for use from publishers other than Amazon. This\n includes:

    \n
      \n
    • \n

      Private extensions registered in the account.

      \n
    • \n
    • \n

      Public extensions from publishers other than Amazon, whether activated or not.

      \n
    • \n
    \n
  • \n
  • \n

    \n AWS_TYPES: Extensions available for use from Amazon.

    \n
  • \n
" + "smithy.api#documentation": "

The category of extensions to return.

\n
    \n
  • \n

    \n REGISTERED: Private extensions that have been registered for this account and\n Region.

    \n
  • \n
  • \n

    \n ACTIVATED: Public extensions that have been activated for this account and\n Region.

    \n
  • \n
  • \n

    \n THIRD_PARTY: Extensions available for use from publishers other than Amazon.\n This includes:

    \n
      \n
    • \n

      Private extensions registered in the account.

      \n
    • \n
    • \n

      Public extensions from publishers other than Amazon, whether activated or not.

      \n
    • \n
    \n
  • \n
  • \n

    \n AWS_TYPES: Extensions available for use from Amazon.

    \n
  • \n
" } }, "PublisherId": { "target": "com.amazonaws.cloudformation#PublisherId", "traits": { - "smithy.api#documentation": "

The id of the publisher of the extension.

\n

Extensions published by Amazon aren't assigned a publisher ID. Use the AWS_TYPES category to\n specify a list of types published by Amazon.

" + "smithy.api#documentation": "

The id of the publisher of the extension.

\n

Extensions published by Amazon aren't assigned a publisher ID. Use the\n AWS_TYPES category to specify a list of types published by Amazon.

" } }, "TypeNamePrefix": { @@ -14516,7 +14516,7 @@ "Message": { "target": "com.amazonaws.cloudformation#ErrorMessage", "traits": { - "smithy.api#documentation": "

An message with details about the error that occurred.

" + "smithy.api#documentation": "

A message with details about the error that occurred.

" } } }, @@ -14557,13 +14557,13 @@ "TypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

The name of the extension.

\n

If you specified a TypeNameAlias when you call the ActivateType API operation in your account\n and Region, CloudFormation considers that alias as the type name.

" + "smithy.api#documentation": "

The name of the extension.

\n

If you specified a TypeNameAlias when you call the ActivateType API\n operation in your account and Region, CloudFormation considers that alias as the type name.

" } }, "DefaultVersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The ID of the default version of the extension. The default version is used when the extension version isn't\n specified.

\n

This applies only to private extensions you have registered in your account. For public extensions, both those\n provided by Amazon and published by third parties, CloudFormation returns null. For more information, see\n RegisterType.

\n

To set the default version of an extension, use SetTypeDefaultVersion.

" + "smithy.api#documentation": "

The ID of the default version of the extension. The default version is used when the\n extension version isn't specified.

\n

This applies only to private extensions you have registered in your account. For public\n extensions, both those provided by Amazon and published by third parties, CloudFormation returns\n null. For more information, see RegisterType.

\n

To set the default version of an extension, use SetTypeDefaultVersion.

" } }, "TypeArn": { @@ -14575,7 +14575,7 @@ "LastUpdated": { "target": "com.amazonaws.cloudformation#Timestamp", "traits": { - "smithy.api#documentation": "

When the specified extension version was registered. This applies only to:

\n
    \n
  • \n

    Private extensions you have registered in your account. For more information, see RegisterType.

    \n
  • \n
  • \n

    Public extensions you have activated in your account with auto-update specified. For more information, see\n ActivateType.

    \n
  • \n
\n

For all other extension types, CloudFormation returns null.

" + "smithy.api#documentation": "

When the specified extension version was registered. This applies only to:

\n
    \n
  • \n

    Private extensions you have registered in your account. For more information, see RegisterType.

    \n
  • \n
  • \n

    Public extensions you have activated in your account with auto-update specified. For more\n information, see ActivateType.

    \n
  • \n
\n

For all other extension types, CloudFormation returns null.

" } }, "Description": { @@ -14587,43 +14587,43 @@ "PublisherId": { "target": "com.amazonaws.cloudformation#PublisherId", "traits": { - "smithy.api#documentation": "

The ID of the extension publisher, if the extension is published by a third party. Extensions published by\n Amazon don't return a publisher ID.

" + "smithy.api#documentation": "

The ID of the extension publisher, if the extension is published by a third party.\n Extensions published by Amazon don't return a publisher ID.

" } }, "OriginalTypeName": { "target": "com.amazonaws.cloudformation#TypeName", "traits": { - "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the type name of the public\n extension.

\n

If you specified a TypeNameAlias when enabling the extension in this account and Region, CloudFormation treats that alias as the extension's type name within the account and Region, not the type name of\n the public extension. For more information, see Specifying aliases to\n refer to extensions in the CloudFormation User Guide.

" + "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the type name of\n the public extension.

\n

If you specified a TypeNameAlias when enabling the extension in this account\n and Region, CloudFormation treats that alias as the extension's type name within the account and\n Region, not the type name of the public extension. For more information, see Use\n aliases to refer to extensions in the CloudFormation User Guide.

" } }, "PublicVersionNumber": { "target": "com.amazonaws.cloudformation#PublicVersionNumber", "traits": { - "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the version of the public extension\n to be used for CloudFormation operations in this account and Region.

\n

How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically\n updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the version of\n the public extension to be used for CloudFormation operations in this account and Region.

\n

How you specified AutoUpdate when enabling the extension affects whether\n CloudFormation automatically updates the extension in this account and Region when a new version is\n released. For more information, see Automatically use new versions of extensions in the\n CloudFormation User Guide.

" } }, "LatestPublicVersion": { "target": "com.amazonaws.cloudformation#PublicVersionNumber", "traits": { - "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the latest version of the public\n extension that is available. For any extensions other than activated third-arty extensions,\n CloudFormation returns null.

\n

How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically\n updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the latest\n version of the public extension that is available. For any extensions other\n than activated third-party extensions, CloudFormation returns null.

\n

How you specified AutoUpdate when enabling the extension affects whether\n CloudFormation automatically updates the extension in this account and Region when a new version is\n released. For more information, see Automatically use new versions of extensions in the\n CloudFormation User Guide.

" } }, "PublisherIdentity": { "target": "com.amazonaws.cloudformation#IdentityProvider", "traits": { - "smithy.api#documentation": "

The service used to verify the publisher identity.

\n

For more information, see Registering your account to publish CloudFormation extensions in the CFN-CLI User Guide for Extension Development.

" + "smithy.api#documentation": "

The service used to verify the publisher identity.

\n

For more information, see Publishing extensions to make\n them available for public use in the CloudFormation Command Line Interface (CLI) User Guide.

" } }, "PublisherName": { "target": "com.amazonaws.cloudformation#PublisherName", "traits": { - "smithy.api#documentation": "

The publisher name, as defined in the public profile for that publisher in the service used to verify the\n publisher identity.

" + "smithy.api#documentation": "

The publisher name, as defined in the public profile for that publisher in the service used\n to verify the publisher identity.

" } }, "IsActivated": { "target": "com.amazonaws.cloudformation#IsActivated", "traits": { - "smithy.api#documentation": "

Whether the extension is activated for this account and Region.

\n

This applies only to third-party public extensions. Extensions published by Amazon are activated by\n default.

" + "smithy.api#documentation": "

Whether the extension is activated for this account and Region.

\n

This applies only to third-party public extensions. Extensions published by Amazon are\n activated by default.

" } } }, @@ -14704,13 +14704,13 @@ "VersionId": { "target": "com.amazonaws.cloudformation#TypeVersionId", "traits": { - "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of the Amazon Resource\n Name (ARN) assigned to the extension version when it's registered.

" + "smithy.api#documentation": "

The ID of a specific version of the extension. The version ID is the value at the end of the\n Amazon Resource Name (ARN) assigned to the extension version when it's registered.

" } }, "IsDefaultVersion": { "target": "com.amazonaws.cloudformation#IsDefaultVersion", "traits": { - "smithy.api#documentation": "

Whether the specified extension version is set as the default version.

\n

This applies only to private extensions you have registered in your account, and extensions published by Amazon.\n For public third-party extensions, CloudFormation returns null.

" + "smithy.api#documentation": "

Whether the specified extension version is set as the default version.

\n

This applies only to private extensions you have registered in your account, and extensions\n published by Amazon. For public third-party extensions, CloudFormation returns\n null.

" } }, "Arn": { @@ -14734,7 +14734,7 @@ "PublicVersionNumber": { "target": "com.amazonaws.cloudformation#PublicVersionNumber", "traits": { - "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the version of the public extension\n to be used for CloudFormation operations in this account and Region. For any extensions other than activated third-arty\n extensions, CloudFormation returns null.

\n

How you specified AutoUpdate when enabling the extension affects whether CloudFormation automatically\n updates the extension in this account and Region when a new version is released. For more information, see Setting CloudFormation to automatically use new versions of extensions in the\n CloudFormation User Guide.

" + "smithy.api#documentation": "

For public extensions that have been activated for this account and Region, the version of\n the public extension to be used for CloudFormation operations in this account and Region. For any\n extensions other than activated third-party extensions, CloudFormation returns\n null.

\n

How you specified AutoUpdate when enabling the extension affects whether\n CloudFormation automatically updates the extension in this account and Region when a new version is\n released. For more information, see Automatically use new versions of extensions in the\n CloudFormation User Guide.

" } } }, @@ -14768,51 +14768,51 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a generated template. This can be used to change the name, add and remove resources, refresh resources,\n and change the DeletionPolicy and UpdateReplacePolicy settings. You can check the status of\n the update to the generated template using the DescribeGeneratedTemplate API action.

", + "smithy.api#documentation": "

Updates a generated template. This can be used to change the name, add and remove\n resources, refresh resources, and change the DeletionPolicy and\n UpdateReplacePolicy settings. You can check the status of the update to the\n generated template using the DescribeGeneratedTemplate API action.

", "smithy.api#examples": [ { - "title": "To update a generated template's name", - "documentation": "This example updates a generated template with a new name.", + "title": "To add resources to a generated template", + "documentation": "This example adds resources to a generated template", "input": { "GeneratedTemplateName": "JazzyTemplate", - "NewGeneratedTemplateName": "JazzierTemplate" + "AddResources": [ + { + "ResourceType": "AWS::S3::Bucket", + "ResourceIdentifier": { + "BucketName": "jazz-bucket" + } + }, + { + "ResourceType": "AWS::EC2::DHCPOptions", + "ResourceIdentifier": { + "DhcpOptionsId": "random-id123" + } + } + ] }, "output": { "GeneratedTemplateId": "arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/88f09db1-d211-4cb7-964b-434e2b8469ca" } }, { - "title": "To remove resources from a generated template", - "documentation": "This example removes resources from a generated template", + "title": "To update a generated template's name", + "documentation": "This example updates a generated template with a new name.", "input": { "GeneratedTemplateName": "JazzyTemplate", - "RemoveResources": [ - "LogicalResourceId1", - "LogicalResourceId2" - ] + "NewGeneratedTemplateName": "JazzierTemplate" }, "output": { "GeneratedTemplateId": "arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/88f09db1-d211-4cb7-964b-434e2b8469ca" } }, { - "title": "To add resources to a generated template", - "documentation": "This example adds resources to a generated template", + "title": "To remove resources from a generated template", + "documentation": "This example removes resources from a generated template", "input": { "GeneratedTemplateName": "JazzyTemplate", - "AddResources": [ - { - "ResourceType": "AWS::S3::Bucket", - "ResourceIdentifier": { - "BucketName": "jazz-bucket" - } - }, - { - "ResourceType": "AWS::EC2::DHCPOptions", - "ResourceIdentifier": { - "DhcpOptionsId": "random-id123" - } - } + "RemoveResources": [ + "LogicalResourceId1", + "LogicalResourceId2" ] }, "output": { @@ -14854,13 +14854,13 @@ "RefreshAllResources": { "target": "com.amazonaws.cloudformation#RefreshAllResources", "traits": { - "smithy.api#documentation": "

If true, update the resource properties in the generated template with their current live state.\n This feature is useful when the resource properties in your generated a template does not reflect the live state of\n the resource properties. This happens when a user update the resource properties after generating a template.

" + "smithy.api#documentation": "

If true, update the resource properties in the generated template with their\n current live state. This feature is useful when the resource properties in your generated a\n template does not reflect the live state of the resource properties. This happens when a user\n update the resource properties after generating a template.

" } }, "TemplateConfiguration": { "target": "com.amazonaws.cloudformation#TemplateConfiguration", "traits": { - "smithy.api#documentation": "

The configuration details of the generated template, including the DeletionPolicy and\n UpdateReplacePolicy.

" + "smithy.api#documentation": "

The configuration details of the generated template, including the\n DeletionPolicy and UpdateReplacePolicy.

" } } }, @@ -14874,7 +14874,7 @@ "GeneratedTemplateId": { "target": "com.amazonaws.cloudformation#GeneratedTemplateId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the generated template. The format is\n arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}.\n For example,\n arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc\n .

" } } }, @@ -14899,7 +14899,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a stack as specified in the template. After the call completes successfully, the stack update starts.\n You can check the status of the stack through the DescribeStacks action.

\n

To get a copy of the template for an existing stack, you can use the GetTemplate\n action.

\n

For more information about creating an update template, updating a stack, and monitoring the progress of the\n update, see Updating a Stack.

" + "smithy.api#documentation": "

Updates a stack as specified in the template. After the call completes successfully, the\n stack update starts. You can check the status of the stack through the DescribeStacks action.

\n

To get a copy of the template for an existing stack, you can use the GetTemplate action.

\n

For more information about updating a stack and monitoring the progress of the update, see\n Managing\n Amazon Web Services resources as a single unit with CloudFormation stacks in the\n CloudFormation User Guide.

" } }, "com.amazonaws.cloudformation#UpdateStackInput": { @@ -14916,85 +14916,85 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.\n (For more information, go to Template Anatomy in the\n CloudFormation User Guide.)

\n

Conditional: You must specify only one of the following parameters: TemplateBody,\n TemplateURL, or set the UsePreviousTemplate to true.

" + "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum\n length of 51,200 bytes.

\n

Conditional: You must specify only one of the following parameters:\n TemplateBody, TemplateURL, or set the\n UsePreviousTemplate to true.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the\n CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://.

\n

Conditional: You must specify only one of the following parameters: TemplateBody,\n TemplateURL, or set the UsePreviousTemplate to true.

" + "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template that's\n located in an Amazon S3 bucket or a Systems Manager document. The location for an Amazon S3 bucket must\n start with https://.

\n

Conditional: You must specify only one of the following parameters:\n TemplateBody, TemplateURL, or set the\n UsePreviousTemplate to true.

" } }, "UsePreviousTemplate": { "target": "com.amazonaws.cloudformation#UsePreviousTemplate", "traits": { - "smithy.api#documentation": "

Reuse the existing template that is associated with the stack that you are updating.

\n

Conditional: You must specify only one of the following parameters: TemplateBody,\n TemplateURL, or set the UsePreviousTemplate to true.

" + "smithy.api#documentation": "

Reuse the existing template that is associated with the stack that you are\n updating.

\n

Conditional: You must specify only one of the following parameters:\n TemplateBody, TemplateURL, or set the\n UsePreviousTemplate to true.

" } }, "StackPolicyDuringUpdateBody": { "target": "com.amazonaws.cloudformation#StackPolicyDuringUpdateBody", "traits": { - "smithy.api#documentation": "

Structure containing the temporary overriding stack policy body. You can specify either the\n StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL parameter, but not\n both.

\n

If you want to update protected resources, specify a temporary overriding stack policy during this update. If\n you don't specify a stack policy, the current policy that is associated with the stack will be used.

" + "smithy.api#documentation": "

Structure containing the temporary overriding stack policy body. You can specify either\n the StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL\n parameter, but not both.

\n

If you want to update protected resources, specify a temporary overriding stack policy\n during this update. If you don't specify a stack policy, the current policy that is associated\n with the stack will be used.

" } }, "StackPolicyDuringUpdateURL": { "target": "com.amazonaws.cloudformation#StackPolicyDuringUpdateURL", "traits": { - "smithy.api#documentation": "

Location of a file containing the temporary overriding stack policy. The URL must point to a policy (max size:\n 16KB) located in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with\n https://. You can specify either the StackPolicyDuringUpdateBody or the\n StackPolicyDuringUpdateURL parameter, but not both.

\n

If you want to update protected resources, specify a temporary overriding stack policy during this update. If\n you don't specify a stack policy, the current policy that is associated with the stack will be used.

" + "smithy.api#documentation": "

Location of a file containing the temporary overriding stack policy. The URL must point to\n a policy (max size: 16KB) located in an S3 bucket in the same Region as the stack. The\n location for an Amazon S3 bucket must start with https://. You can specify either the\n StackPolicyDuringUpdateBody or the StackPolicyDuringUpdateURL\n parameter, but not both.

\n

If you want to update protected resources, specify a temporary overriding stack policy\n during this update. If you don't specify a stack policy, the current policy that is associated\n with the stack will be used.

" } }, "Parameters": { "target": "com.amazonaws.cloudformation#Parameters", "traits": { - "smithy.api#documentation": "

A list of Parameter structures that specify input parameters for the stack. For more information,\n see the Parameter\n data type.

" + "smithy.api#documentation": "

A list of Parameter structures that specify input parameters for the stack.\n For more information, see the Parameter data\n type.

" } }, "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order\n for CloudFormation to update the stack.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account;\n for example, by creating new Identity and Access Management (IAM) users. For those stacks, you must explicitly\n acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the CAPABILITY_IAM or\n CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must specify\n CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we suggest that you review all permissions associated with\n them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM Resources in\n CloudFormation Templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some template contain macros. Macros perform custom processing on templates; this can include simple actions\n like find-and-replace operations, all the way to extensive transformations of entire templates. Because of this,\n users typically create a change set from the processed template, so that they can review the changes resulting from\n the macros before actually updating the stack. If your stack template contains one or more macros, and you choose\n to update a stack directly from the processed template, without first reviewing the resulting changes in a change\n set, you must acknowledge this capability. This includes the AWS::Include and AWS::Serverless transforms, which\n are macros hosted by CloudFormation.

    \n

    If you want to update a stack from a stack template that contains macros and nested\n stacks, you must update the stack directly from the template using this capability.

    \n \n

    You should only update stacks directly from a stack template that contains macros if you know what processing\n the macro performs.

    \n

    Each macro relies on an underlying Lambda service function for processing stack templates. Be\n aware that the Lambda function owner can update the function operation without CloudFormation being\n notified.

    \n
    \n

    For more information, see Using CloudFormation Macros to Perform Custom Processing\n on Templates.

    \n
  • \n
\n \n

Only one of the Capabilities and ResourceType parameters can be specified.

\n
" + "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to update the stack.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account, for example, by creating new IAM users. For those stacks, you must\n explicitly acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we suggest that you review all\n permissions associated with them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM resources in CloudFormation templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some template contain macros. Macros perform custom processing on templates; this can\n include simple actions like find-and-replace operations, all the way to extensive\n transformations of entire templates. Because of this, users typically create a change set\n from the processed template, so that they can review the changes resulting from the macros\n before actually updating the stack. If your stack template contains one or more macros,\n and you choose to update a stack directly from the processed template, without first\n reviewing the resulting changes in a change set, you must acknowledge this capability.\n This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.

    \n

    If you want to update a stack from a stack template that contains macros\n and nested stacks, you must update the stack directly from the\n template using this capability.

    \n \n

    You should only update stacks directly from a stack template that contains macros if\n you know what processing the macro performs.

    \n

    Each macro relies on an underlying Lambda service function for processing stack\n templates. Be aware that the Lambda function owner can update the function operation\n without CloudFormation being notified.

    \n
    \n

    For more information, see Perform custom processing\n on CloudFormation templates with template macros.

    \n
  • \n
\n \n

Only one of the Capabilities and ResourceType parameters can\n be specified.

\n
" } }, "ResourceTypes": { "target": "com.amazonaws.cloudformation#ResourceTypes", "traits": { - "smithy.api#documentation": "

The template resource types that you have permissions to work with for this update stack action, such as\n AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance.

\n

If the list of resource types doesn't include a resource that you're updating, the stack update fails. By\n default, CloudFormation grants permissions to all resource types. Identity and Access Management (IAM) uses this parameter for\n CloudFormation-specific condition keys in IAM policies. For more information, see Controlling Access with Identity and Access Management.

\n \n

Only one of the Capabilities and ResourceType parameters can be specified.

\n
" + "smithy.api#documentation": "

The template resource types that you have permissions to work with for this update stack\n action, such as AWS::EC2::Instance, AWS::EC2::*, or\n Custom::MyCustomInstance.

\n

If the list of resource types doesn't include a resource that you're updating, the stack\n update fails. By default, CloudFormation grants permissions to all resource types. IAM uses this\n parameter for CloudFormation-specific condition keys in IAM policies. For more information, see\n Control access with Identity and Access Management.

\n \n

Only one of the Capabilities and ResourceType parameters can\n be specified.

\n
" } }, "RoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFormation assumes to\n update the stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always uses this role\n for all future operations on the stack. Provided that users have permission to operate on the stack, CloudFormation uses\n this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with the stack. If no role\n is available, CloudFormation uses a temporary session that is generated from your user credentials.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role that CloudFormation assumes to update the\n stack. CloudFormation uses the role's credentials to make calls on your behalf. CloudFormation always\n uses this role for all future operations on the stack. Provided that users have permission to\n operate on the stack, CloudFormation uses this role even if the users don't have permission to\n pass it. Ensure that the role grants least privilege.

\n

If you don't specify a value, CloudFormation uses the role that was previously associated with\n the stack. If no role is available, CloudFormation uses a temporary session that is generated from\n your user credentials.

" } }, "RollbackConfiguration": { "target": "com.amazonaws.cloudformation#RollbackConfiguration", "traits": { - "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating operations, and for the\n specified monitoring period afterwards.

" + "smithy.api#documentation": "

The rollback triggers for CloudFormation to monitor during stack creation and updating\n operations, and for the specified monitoring period afterwards.

" } }, "StackPolicyBody": { "target": "com.amazonaws.cloudformation#StackPolicyBody", "traits": { - "smithy.api#documentation": "

Structure containing a new stack policy body. You can specify either the StackPolicyBody or the\n StackPolicyURL parameter, but not both.

\n

You might update the stack policy, for example, in order to protect a new resource that you created during a\n stack update. If you don't specify a stack policy, the current policy that is associated with the stack is\n unchanged.

" + "smithy.api#documentation": "

Structure containing a new stack policy body. You can specify either the\n StackPolicyBody or the StackPolicyURL parameter, but not\n both.

\n

You might update the stack policy, for example, in order to protect a new resource that\n you created during a stack update. If you don't specify a stack policy, the current policy\n that is associated with the stack is unchanged.

" } }, "StackPolicyURL": { "target": "com.amazonaws.cloudformation#StackPolicyURL", "traits": { - "smithy.api#documentation": "

Location of a file containing the updated stack policy. The URL must point to a policy (max size: 16KB) located\n in an S3 bucket in the same Region as the stack. The location for an Amazon S3 bucket must start with\n https://. You can specify either the StackPolicyBody or the StackPolicyURL\n parameter, but not both.

\n

You might update the stack policy, for example, in order to protect a new resource that you created during a\n stack update. If you don't specify a stack policy, the current policy that is associated with the stack is\n unchanged.

" + "smithy.api#documentation": "

Location of a file containing the updated stack policy. The URL must point to a policy\n (max size: 16KB) located in an S3 bucket in the same Region as the stack. The location for an\n Amazon S3 bucket must start with https://. You can specify either the\n StackPolicyBody or the StackPolicyURL parameter, but not\n both.

\n

You might update the stack policy, for example, in order to protect a new resource that\n you created during a stack update. If you don't specify a stack policy, the current policy\n that is associated with the stack is unchanged.

" } }, "NotificationARNs": { "target": "com.amazonaws.cloudformation#NotificationARNs", "traits": { - "smithy.api#documentation": "

Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that CloudFormation associates with the stack.\n Specify an empty list to remove all notification topics.

" + "smithy.api#documentation": "

Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that CloudFormation\n associates with the stack. Specify an empty list to remove all notification topics.

" } }, "Tags": { "target": "com.amazonaws.cloudformation#Tags", "traits": { - "smithy.api#documentation": "

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the\n stack. You can specify a maximum number of 50 tags.

\n

If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value,\n CloudFormation removes all associated tags.

" + "smithy.api#documentation": "

Key-value pairs to associate with this stack. CloudFormation also propagates these tags to\n supported resources in the stack. You can specify a maximum number of 50 tags.

\n

If you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you\n specify an empty value, CloudFormation removes all associated tags.

" } }, "DisableRollback": { @@ -15006,13 +15006,13 @@ "ClientRequestToken": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

A unique identifier for this UpdateStack request. Specify this token if you plan to retry requests\n so that CloudFormation knows that you're not attempting to update a stack with the same name. You might retry\n UpdateStack requests to ensure that CloudFormation successfully received them.

\n

All events triggered by a given stack operation are assigned the same client request token, which you can use to\n track operations. For example, if you execute a CreateStack operation with the token\n token1, then all the StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack operations that are\n initiated from the console use the token format Console-StackOperation-ID, which helps you\n easily identify the stack operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" + "smithy.api#documentation": "

A unique identifier for this UpdateStack request. Specify this token if you\n plan to retry requests so that CloudFormation knows that you're not attempting to update a stack\n with the same name. You might retry UpdateStack requests to ensure that\n CloudFormation successfully received them.

\n

All events triggered by a given stack operation are assigned the same client request\n token, which you can use to track operations. For example, if you execute a\n CreateStack operation with the token token1, then all the\n StackEvents generated by that operation will have\n ClientRequestToken set as token1.

\n

In the console, stack operations display the client request token on the Events tab. Stack\n operations that are initiated from the console use the token format\n Console-StackOperation-ID, which helps you easily identify the stack\n operation . For example, if you create a stack using the console, each stack event would be\n assigned the same token in the following format:\n Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

" } }, "RetainExceptOnCreate": { "target": "com.amazonaws.cloudformation#RetainExceptOnCreate", "traits": { - "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation rolls back. This includes\n newly created resources marked with a deletion policy of Retain.

\n

Default: false\n

" + "smithy.api#documentation": "

When set to true, newly created resources are deleted when the operation\n rolls back. This includes newly created resources marked with a deletion policy of\n Retain.

\n

Default: false\n

" } } }, @@ -15050,7 +15050,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the parameter values for stack instances for the specified accounts, within the specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and Region.

\n

You can only update stack instances in Amazon Web Services Regions and accounts where they already exist; to\n create additional stack instances, use CreateStackInstances.

\n

During stack set updates, any parameters overridden for a stack instance aren't updated, but retain their\n overridden value.

\n

You can only update the parameter values that are specified in the stack set; to add or\n delete a parameter itself, use UpdateStackSet to update the stack set\n template. If you add a parameter to a template, before you can override the parameter value specified in the stack\n set you must first use UpdateStackSet to update all stack\n instances with the updated template and parameter value specified in the stack set. Once a stack instance has been\n updated with the new parameter, you can then override the parameter value using\n UpdateStackInstances.

" + "smithy.api#documentation": "

Updates the parameter values for stack instances for the specified accounts, within the\n specified Amazon Web Services Regions. A stack instance refers to a stack in a specific account and\n Region.

\n

You can only update stack instances in Amazon Web Services Regions and accounts where they already\n exist; to create additional stack instances, use CreateStackInstances.

\n

During stack set updates, any parameters overridden for a stack instance aren't updated,\n but retain their overridden value.

\n

You can only update the parameter values that are specified in the\n stack set; to add or delete a parameter itself, use UpdateStackSet\n to update the stack set template. If you add a parameter to a template, before you can\n override the parameter value specified in the stack set you must first use UpdateStackSet to update all stack instances with the updated template and\n parameter value specified in the stack set. Once a stack instance has been updated with the\n new parameter, you can then override the parameter value using\n UpdateStackInstances.

" } }, "com.amazonaws.cloudformation#UpdateStackInstancesInput": { @@ -15067,27 +15067,27 @@ "Accounts": { "target": "com.amazonaws.cloudformation#AccountList", "traits": { - "smithy.api#documentation": "

[Self-managed permissions] The names of one or more Amazon Web Services accounts for which you want to update\n parameter values for stack instances. The overridden parameter values will be applied to all stack instances in the\n specified accounts and Amazon Web Services Regions.

\n

You can specify Accounts or DeploymentTargets, but not both.

" + "smithy.api#documentation": "

[Self-managed permissions] The names of one or more Amazon Web Services accounts for which you want to\n update parameter values for stack instances. The overridden parameter values will be applied\n to all stack instances in the specified accounts and Amazon Web Services Regions.

\n

You can specify Accounts or DeploymentTargets, but not\n both.

" } }, "DeploymentTargets": { "target": "com.amazonaws.cloudformation#DeploymentTargets", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts for which you want to update parameter values\n for stack instances. If your update targets OUs, the overridden parameter values only apply to the accounts that are\n currently in the target OUs and their child OUs. Accounts added to the target OUs and their child OUs in the future\n won't use the overridden values.

\n

You can specify Accounts or DeploymentTargets, but not both.

" + "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts for which you want to\n update parameter values for stack instances. If your update targets OUs, the overridden\n parameter values only apply to the accounts that are currently in the target OUs and their\n child OUs. Accounts added to the target OUs and their child OUs in the future won't use the\n overridden values.

\n

You can specify Accounts or DeploymentTargets, but not\n both.

" } }, "Regions": { "target": "com.amazonaws.cloudformation#RegionList", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The names of one or more Amazon Web Services Regions in which you want to update parameter values for stack\n instances. The overridden parameter values will be applied to all stack instances in the specified accounts and\n Amazon Web Services Regions.

", + "smithy.api#documentation": "

The names of one or more Amazon Web Services Regions in which you want to update parameter values for\n stack instances. The overridden parameter values will be applied to all stack instances in the\n specified accounts and Amazon Web Services Regions.

", "smithy.api#required": {} } }, "ParameterOverrides": { "target": "com.amazonaws.cloudformation#Parameters", "traits": { - "smithy.api#documentation": "

A list of input parameters whose values you want to update for the specified stack instances.

\n

Any overridden parameter values will be applied to all stack instances in the specified accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how CloudFormation sets parameter values\n during stack instance update operations:

\n
    \n
  • \n

    To override the current value for a parameter, include the parameter and specify its value.

    \n
  • \n
  • \n

    To leave an overridden parameter set to its present value, include the parameter and specify\n UsePreviousValue as true. (You can't specify both a value and set\n UsePreviousValue to true.)

    \n
  • \n
  • \n

    To set an overridden parameter back to the value specified in the stack set, specify a parameter list but\n don't include the parameter in the list.

    \n
  • \n
  • \n

    To leave all parameters set to their present values, don't specify this property at all.

    \n
  • \n
\n

During stack set updates, any parameter values overridden for a stack instance aren't updated, but retain their\n overridden value.

\n

You can only override the parameter values that are specified in the stack set; to add or\n delete a parameter itself, use UpdateStackSet to update the stack set template. If you add a parameter\n to a template, before you can override the parameter value specified in the stack set you must first use UpdateStackSet to\n update all stack instances with the updated template and parameter value specified in the stack set. Once a stack\n instance has been updated with the new parameter, you can then override the parameter value using\n UpdateStackInstances.

" + "smithy.api#documentation": "

A list of input parameters whose values you want to update for the specified stack\n instances.

\n

Any overridden parameter values will be applied to all stack instances in the specified\n accounts and Amazon Web Services Regions. When specifying parameters and their values, be aware of how\n CloudFormation sets parameter values during stack instance update operations:

\n
    \n
  • \n

    To override the current value for a parameter, include the parameter and specify its\n value.

    \n
  • \n
  • \n

    To leave an overridden parameter set to its present value, include the parameter and\n specify UsePreviousValue as true. (You can't specify both a\n value and set UsePreviousValue to true.)

    \n
  • \n
  • \n

    To set an overridden parameter back to the value specified in the stack set, specify a\n parameter list but don't include the parameter in the list.

    \n
  • \n
  • \n

    To leave all parameters set to their present values, don't specify this property at\n all.

    \n
  • \n
\n

During stack set updates, any parameter values overridden for a stack instance aren't\n updated, but retain their overridden value.

\n

You can only override the parameter values that are specified in the\n stack set; to add or delete a parameter itself, use UpdateStackSet to update the\n stack set template. If you add a parameter to a template, before you can override the\n parameter value specified in the stack set you must first use UpdateStackSet\n to update all stack instances with the updated template and parameter value specified in the\n stack set. Once a stack instance has been updated with the new parameter, you can then\n override the parameter value using UpdateStackInstances.

" } }, "OperationPreferences": { @@ -15099,14 +15099,14 @@ "OperationId": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

The unique identifier for this stack set operation.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set\n operation only once, even if you retry the request multiple times. You might retry stack set operation requests to\n ensure that CloudFormation successfully received them.

\n

If you don't specify an operation ID, the SDK generates one automatically.

", + "smithy.api#documentation": "

The unique identifier for this stack set operation.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation\n performs the stack set operation only once, even if you retry the request multiple times. You\n might retry stack set operation requests to ensure that CloudFormation successfully received\n them.

\n

If you don't specify an operation ID, the SDK generates one\n automatically.

", "smithy.api#idempotencyToken": {} } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } } }, @@ -15172,7 +15172,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the stack set, and associated stack instances in the specified accounts and Amazon Web Services Regions.

\n

Even if the stack set operation created by updating the stack set fails (completely or partially, below or above\n a specified failure tolerance), the stack set is updated with your changes. Subsequent CreateStackInstances calls on the specified stack set use the updated stack set.

" + "smithy.api#documentation": "

Updates the stack set, and associated stack instances in the specified accounts and\n Amazon Web Services Regions.

\n

Even if the stack set operation created by updating the stack set fails (completely or\n partially, below or above a specified failure tolerance), the stack set is updated with your\n changes. Subsequent CreateStackInstances calls on the specified stack set\n use the updated stack set.

" } }, "com.amazonaws.cloudformation#UpdateStackSetInput": { @@ -15195,19 +15195,19 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200\n bytes. For more information, see Template Anatomy in the\n CloudFormation User Guide.

\n

Conditional: You must specify only one of the following parameters: TemplateBody or\n TemplateURL—or set UsePreviousTemplate to true.

" + "smithy.api#documentation": "

The structure that contains the template body, with a minimum length of 1 byte and a\n maximum length of 51,200 bytes.

\n

Conditional: You must specify only one of the following parameters:\n TemplateBody or TemplateURL—or set\n UsePreviousTemplate to true.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

The location of the file that contains the template body. The URL must point to a template (maximum size:\n 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. For more information,\n see Template\n Anatomy in the CloudFormation User Guide.

\n

Conditional: You must specify only one of the following parameters: TemplateBody or\n TemplateURL—or set UsePreviousTemplate to true.

" + "smithy.api#documentation": "

The location of the file that contains the template body. The URL must point to a template\n (maximum size: 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager\n document.

\n

Conditional: You must specify only one of the following parameters:\n TemplateBody or TemplateURL—or set\n UsePreviousTemplate to true.

" } }, "UsePreviousTemplate": { "target": "com.amazonaws.cloudformation#UsePreviousTemplate", "traits": { - "smithy.api#documentation": "

Use the existing template that's associated with the stack set that you're updating.

\n

Conditional: You must specify only one of the following parameters: TemplateBody or\n TemplateURL—or set UsePreviousTemplate to true.

" + "smithy.api#documentation": "

Use the existing template that's associated with the stack set that you're\n updating.

\n

Conditional: You must specify only one of the following parameters:\n TemplateBody or TemplateURL—or set\n UsePreviousTemplate to true.

" } }, "Parameters": { @@ -15219,13 +15219,13 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain capabilities in order\n for CloudFormation to update the stack set and its associated stack instances.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your Amazon Web Services account;\n for example, by creating new Identity and Access Management (IAM) users. For those stacks sets, you must explicitly\n acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the CAPABILITY_IAM or\n CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must specify\n CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we recommend that you review all permissions associated with\n them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM Resources in\n CloudFormation Templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some templates reference macros. If your stack set template references one or more macros, you must update the\n stack set directly from the processed template, without first reviewing the resulting changes in a change set. To\n update the stack set directly, you must acknowledge this capability. For more information, see Using CloudFormation Macros to\n Perform Custom Processing on Templates.

    \n \n

    Stack sets with service-managed permissions do not currently support the use of macros in templates. (This\n includes the AWS::Include and AWS::Serverless transforms, which\n are macros hosted by CloudFormation.) Even if you specify this capability for a stack set with service-managed permissions,\n if you reference a macro in your template the stack set operation will fail.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

In some cases, you must explicitly acknowledge that your stack template contains certain\n capabilities in order for CloudFormation to update the stack set and its associated stack\n instances.

\n
    \n
  • \n

    \n CAPABILITY_IAM and CAPABILITY_NAMED_IAM\n

    \n

    Some stack templates might include resources that can affect permissions in your\n Amazon Web Services account, for example, by creating new IAM users. For those stacks sets, you must\n explicitly acknowledge this by specifying one of these capabilities.

    \n

    The following IAM resources require you to specify either the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability.

    \n
      \n
    • \n

      If you have IAM resources, you can specify either capability.

      \n
    • \n
    • \n

      If you have IAM resources with custom names, you must\n specify CAPABILITY_NAMED_IAM.

      \n
    • \n
    • \n

      If you don't specify either of these capabilities, CloudFormation returns an\n InsufficientCapabilities error.

      \n
    • \n
    \n

    If your stack template contains these resources, we recommend that you review all\n permissions associated with them and edit their permissions if necessary.

    \n \n

    For more information, see Acknowledging IAM resources in CloudFormation templates.

    \n
  • \n
  • \n

    \n CAPABILITY_AUTO_EXPAND\n

    \n

    Some templates reference macros. If your stack set template references one or more\n macros, you must update the stack set directly from the processed template, without first\n reviewing the resulting changes in a change set. To update the stack set directly, you\n must acknowledge this capability. For more information, see Using CloudFormation Macros to\n Perform Custom Processing on Templates.

    \n \n

    Stack sets with service-managed permissions do not currently support the use of\n macros in templates. (This includes the AWS::Include and AWS::Serverless transforms, which are macros hosted by CloudFormation.) Even if\n you specify this capability for a stack set with service-managed permissions, if you\n reference a macro in your template the stack set operation will fail.

    \n
    \n
  • \n
" } }, "Tags": { "target": "com.amazonaws.cloudformation#Tags", "traits": { - "smithy.api#documentation": "

The key-value pairs to associate with this stack set and the stacks created from it. CloudFormation also propagates these\n tags to supported resources that are created in the stacks. You can specify a maximum number of 50 tags.

\n

If you specify tags for this parameter, those tags replace any list of tags that are currently associated with\n this stack set. This means:

\n
    \n
  • \n

    If you don't specify this parameter, CloudFormation doesn't modify the stack's tags.

    \n
  • \n
  • \n

    If you specify any tags using this parameter, you must specify all\n the tags that you want associated with this stack set, even tags you've specified before (for example, when\n creating the stack set or during a previous update of the stack set.). Any tags that you don't include in the\n updated list of tags are removed from the stack set, and therefore from the stacks and resources as well.

    \n
  • \n
  • \n

    If you specify an empty value, CloudFormation removes all currently associated tags.

    \n
  • \n
\n

If you specify new tags as part of an UpdateStackSet action, CloudFormation checks to see if you have the\n required IAM permission to tag resources. If you omit tags that are currently associated with the stack set from\n the list of tags you specify, CloudFormation assumes that you want to remove those tags from the stack set, and checks to see\n if you have permission to untag resources. If you don't have the necessary permission(s), the entire\n UpdateStackSet action fails with an access denied error, and the stack set is not\n updated.

" + "smithy.api#documentation": "

The key-value pairs to associate with this stack set and the stacks created from it.\n CloudFormation also propagates these tags to supported resources that are created in the stacks.\n You can specify a maximum number of 50 tags.

\n

If you specify tags for this parameter, those tags replace any list of tags that are\n currently associated with this stack set. This means:

\n
    \n
  • \n

    If you don't specify this parameter, CloudFormation doesn't modify the stack's\n tags.

    \n
  • \n
  • \n

    If you specify any tags using this parameter, you must specify\n all the tags that you want associated with this stack set, even\n tags you've specified before (for example, when creating the stack set or during a\n previous update of the stack set.). Any tags that you don't include in the updated list of\n tags are removed from the stack set, and therefore from the stacks and resources as\n well.

    \n
  • \n
  • \n

    If you specify an empty value, CloudFormation removes all currently associated\n tags.

    \n
  • \n
\n

If you specify new tags as part of an UpdateStackSet action, CloudFormation\n checks to see if you have the required IAM permission to tag resources. If you omit tags\n that are currently associated with the stack set from the list of tags you specify, CloudFormation\n assumes that you want to remove those tags from the stack set, and checks to see if you have\n permission to untag resources. If you don't have the necessary permission(s), the entire\n UpdateStackSet action fails with an access denied error, and the\n stack set is not updated.

" } }, "OperationPreferences": { @@ -15237,62 +15237,62 @@ "AdministrationRoleARN": { "target": "com.amazonaws.cloudformation#RoleARN", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.

\n

Specify an IAM role only if you are using customized administrator roles to control which users or groups can\n manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack\n Set Operations in the CloudFormation User Guide.

\n

If you specified a customized administrator role when you created the stack set, you must specify a customized\n administrator role, even if it is the same customized administrator role used with this stack set previously.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role to use to update this stack set.

\n

Specify an IAM role only if you are using customized administrator roles to control\n which users or groups can manage specific stack sets within the same administrator account.\n For more information, see Granting Permissions for\n Stack Set Operations in the CloudFormation User Guide.

\n

If you specified a customized administrator role when you created the stack set, you must\n specify a customized administrator role, even if it is the same customized administrator role\n used with this stack set previously.

" } }, "ExecutionRoleName": { "target": "com.amazonaws.cloudformation#ExecutionRoleName", "traits": { - "smithy.api#documentation": "

The name of the IAM execution role to use to update the stack set. If you do not specify an execution role,\n CloudFormation uses the AWSCloudFormationStackSetExecutionRole role for the stack set operation.

\n

Specify an IAM role only if you are using customized execution roles to control which stack resources users\n and groups can include in their stack sets.

\n

If you specify a customized execution role, CloudFormation uses that role to update the stack. If you do not specify a\n customized execution role, CloudFormation performs the update using the role previously associated with the stack set, so long\n as you have permissions to perform operations on the stack set.

" + "smithy.api#documentation": "

The name of the IAM execution role to use to update the stack set. If you do not specify\n an execution role, CloudFormation uses the AWSCloudFormationStackSetExecutionRole\n role for the stack set operation.

\n

Specify an IAM role only if you are using customized execution roles to control which\n stack resources users and groups can include in their stack sets.

\n

If you specify a customized execution role, CloudFormation uses that role to update the stack.\n If you do not specify a customized execution role, CloudFormation performs the update using the\n role previously associated with the stack set, so long as you have permissions to perform\n operations on the stack set.

" } }, "DeploymentTargets": { "target": "com.amazonaws.cloudformation#DeploymentTargets", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts in which to update associated stack\n instances.

\n

To update all the stack instances associated with this stack set, do not specify DeploymentTargets\n or Regions.

\n

If the stack set update includes changes to the template (that is, if TemplateBody or\n TemplateURL is specified), or the Parameters, CloudFormation marks all stack instances with a\n status of OUTDATED prior to updating the stack instances in the specified accounts and Amazon Web Services Regions. If the stack set update doesn't include changes to the template or parameters, CloudFormation updates the\n stack instances in the specified accounts and Regions, while leaving all other stack instances with their existing\n stack instance status.

" + "smithy.api#documentation": "

[Service-managed permissions] The Organizations accounts in which to update\n associated stack instances.

\n

To update all the stack instances associated with this stack set, do not specify\n DeploymentTargets or Regions.

\n

If the stack set update includes changes to the template (that is, if\n TemplateBody or TemplateURL is specified), or the\n Parameters, CloudFormation marks all stack instances with a status of\n OUTDATED prior to updating the stack instances in the specified accounts and\n Amazon Web Services Regions. If the stack set update doesn't include changes to the template or parameters,\n CloudFormation updates the stack instances in the specified accounts and Regions, while leaving\n all other stack instances with their existing stack instance status.

" } }, "PermissionModel": { "target": "com.amazonaws.cloudformation#PermissionModels", "traits": { - "smithy.api#documentation": "

Describes how the IAM roles required for stack set operations are created. You cannot modify\n PermissionModel if there are stack instances associated with your stack set.

\n " + "smithy.api#documentation": "

Describes how the IAM roles required for stack set operations are created. You cannot\n modify PermissionModel if there are stack instances associated with your stack\n set.

\n " } }, "AutoDeployment": { "target": "com.amazonaws.cloudformation#AutoDeployment", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations\n accounts that are added to a target organization or organizational unit (OU).

\n

If you specify AutoDeployment, don't specify DeploymentTargets or\n Regions.

" + "smithy.api#documentation": "

[Service-managed permissions] Describes whether StackSets automatically deploys to Organizations accounts that are added to a target organization or organizational unit\n (OU).

\n

If you specify AutoDeployment, don't specify DeploymentTargets\n or Regions.

" } }, "OperationId": { "target": "com.amazonaws.cloudformation#ClientRequestToken", "traits": { - "smithy.api#documentation": "

The unique ID for this stack set operation.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation performs the stack set\n operation only once, even if you retry the request multiple times. You might retry stack set operation requests to\n ensure that CloudFormation successfully received them.

\n

If you don't specify an operation ID, CloudFormation generates one automatically.

\n

Repeating this stack set operation with a new operation ID retries all stack instances whose status is\n OUTDATED.

", + "smithy.api#documentation": "

The unique ID for this stack set operation.

\n

The operation ID also functions as an idempotency token, to ensure that CloudFormation\n performs the stack set operation only once, even if you retry the request multiple times. You\n might retry stack set operation requests to ensure that CloudFormation successfully received\n them.

\n

If you don't specify an operation ID, CloudFormation generates one\n automatically.

\n

Repeating this stack set operation with a new operation ID retries all stack instances\n whose status is OUTDATED.

", "smithy.api#idempotencyToken": {} } }, "Accounts": { "target": "com.amazonaws.cloudformation#AccountList", "traits": { - "smithy.api#documentation": "

[Self-managed permissions] The accounts in which to update associated stack instances. If you specify accounts,\n you must also specify the Amazon Web Services Regions in which to update stack set instances.

\n

To update all the stack instances associated with this stack set, don't specify the\n Accounts or Regions properties.

\n

If the stack set update includes changes to the template (that is, if the TemplateBody or\n TemplateURL properties are specified), or the Parameters property, CloudFormation marks all stack\n instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and\n Amazon Web Services Regions. If the stack set update does not include changes to the template or parameters, CloudFormation\n updates the stack instances in the specified accounts and Amazon Web Services Regions, while leaving all other stack\n instances with their existing stack instance status.

" + "smithy.api#documentation": "

[Self-managed permissions] The accounts in which to update associated stack instances. If\n you specify accounts, you must also specify the Amazon Web Services Regions in which to update stack set\n instances.

\n

To update all the stack instances associated with this stack set,\n don't specify the Accounts or Regions properties.

\n

If the stack set update includes changes to the template (that is, if the\n TemplateBody or TemplateURL properties are specified), or the\n Parameters property, CloudFormation marks all stack instances with a status of\n OUTDATED prior to updating the stack instances in the specified accounts and\n Amazon Web Services Regions. If the stack set update does not include changes to the template or parameters,\n CloudFormation updates the stack instances in the specified accounts and Amazon Web Services Regions, while\n leaving all other stack instances with their existing stack instance status.

" } }, "Regions": { "target": "com.amazonaws.cloudformation#RegionList", "traits": { - "smithy.api#documentation": "

The Amazon Web Services Regions in which to update associated stack instances. If you specify Regions, you must\n also specify accounts in which to update stack set instances.

\n

To update all the stack instances associated with this stack set, do not specify the\n Accounts or Regions properties.

\n

If the stack set update includes changes to the template (that is, if the TemplateBody or\n TemplateURL properties are specified), or the Parameters property, CloudFormation marks all stack\n instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and\n Regions. If the stack set update does not include changes to the template or parameters, CloudFormation updates the stack\n instances in the specified accounts and Regions, while leaving all other stack instances with their existing stack\n instance status.

" + "smithy.api#documentation": "

The Amazon Web Services Regions in which to update associated stack instances. If you specify Regions,\n you must also specify accounts in which to update stack set instances.

\n

To update all the stack instances associated with this stack set, do\n not specify the Accounts or Regions properties.

\n

If the stack set update includes changes to the template (that is, if the\n TemplateBody or TemplateURL properties are specified), or the\n Parameters property, CloudFormation marks all stack instances with a status of\n OUTDATED prior to updating the stack instances in the specified accounts and\n Regions. If the stack set update does not include changes to the template or parameters,\n CloudFormation updates the stack instances in the specified accounts and Regions, while leaving\n all other stack instances with their existing stack instance status.

" } }, "CallAs": { "target": "com.amazonaws.cloudformation#CallAs", "traits": { - "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator in the organization's\n management account or as a delegated administrator in a member account.

\n

By default, SELF is specified. Use SELF for stack sets with self-managed\n permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a delegated\n administrator in the CloudFormation User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

[Service-managed permissions] Specifies whether you are acting as an account administrator\n in the organization's management account or as a delegated administrator in a\n member account.

\n

By default, SELF is specified. Use SELF for stack sets with\n self-managed permissions.

\n
    \n
  • \n

    If you are signed in to the management account, specify\n SELF.

    \n
  • \n
  • \n

    If you are signed in to a delegated administrator account, specify\n DELEGATED_ADMIN.

    \n

    Your Amazon Web Services account must be registered as a delegated administrator in the management account. For more information, see Register a\n delegated administrator in the CloudFormation User Guide.

    \n
  • \n
" } }, "ManagedExecution": { "target": "com.amazonaws.cloudformation#ManagedExecution", "traits": { - "smithy.api#documentation": "

Describes whether StackSets performs non-conflicting operations concurrently and queues conflicting\n operations.

" + "smithy.api#documentation": "

Describes whether StackSets performs non-conflicting operations concurrently and queues\n conflicting operations.

" } } }, @@ -15323,7 +15323,7 @@ "target": "com.amazonaws.cloudformation#UpdateTerminationProtectionOutput" }, "traits": { - "smithy.api#documentation": "

Updates termination protection for the specified stack. If a user attempts to delete a stack with termination\n protection enabled, the operation fails and the stack remains unchanged. For more information, see Protecting a Stack From\n Being Deleted in the CloudFormation User Guide.

\n

For nested\n stacks, termination protection is set on the root stack and can't be changed directly on the nested\n stack.

" + "smithy.api#documentation": "

Updates termination protection for the specified stack. If a user attempts to delete a\n stack with termination protection enabled, the operation fails and the stack remains\n unchanged. For more information, see Protect a CloudFormation\n stack from being deleted in the CloudFormation User Guide.

\n

For nested stacks,\n termination protection is set on the root stack and can't be changed directly on the nested\n stack.

" } }, "com.amazonaws.cloudformation#UpdateTerminationProtectionInput": { @@ -15341,7 +15341,7 @@ "target": "com.amazonaws.cloudformation#StackNameOrId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name or unique ID of the stack for which you want to set termination protection.

", + "smithy.api#documentation": "

The name or unique ID of the stack for which you want to set termination\n protection.

", "smithy.api#required": {} } } @@ -15382,7 +15382,7 @@ "target": "com.amazonaws.cloudformation#ValidateTemplateOutput" }, "traits": { - "smithy.api#documentation": "

Validates a specified template. CloudFormation first checks if the template is valid JSON. If it isn't, CloudFormation\n checks if the template is valid YAML. If both these checks fail, CloudFormation returns a template validation\n error.

" + "smithy.api#documentation": "

Validates a specified template. CloudFormation first checks if the template is valid JSON. If\n it isn't, CloudFormation checks if the template is valid YAML. If both these checks fail,\n CloudFormation returns a template validation error.

" } }, "com.amazonaws.cloudformation#ValidateTemplateInput": { @@ -15391,13 +15391,13 @@ "TemplateBody": { "target": "com.amazonaws.cloudformation#TemplateBody", "traits": { - "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For\n more information, go to Template Anatomy in the CloudFormation User Guide.

\n

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only\n TemplateBody is used.

" + "smithy.api#documentation": "

Structure containing the template body with a minimum length of 1 byte and a maximum\n length of 51,200 bytes.

\n

Conditional: You must pass TemplateURL or TemplateBody. If both\n are passed, only TemplateBody is used.

" } }, "TemplateURL": { "target": "com.amazonaws.cloudformation#TemplateURL", "traits": { - "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that\n is located in an Amazon S3 bucket or a Systems Manager document. For more information, go to Template Anatomy in the\n CloudFormation User Guide. The location for an Amazon S3 bucket must start with https://.

\n

Conditional: You must pass TemplateURL or TemplateBody. If both are passed, only\n TemplateBody is used.

" + "smithy.api#documentation": "

Location of file containing the template body. The URL must point to a template (max size:\n 460,800 bytes) that is located in an Amazon S3 bucket or a Systems Manager document. The location\n for an Amazon S3 bucket must start with https://.

\n

Conditional: You must pass TemplateURL or TemplateBody. If both\n are passed, only TemplateBody is used.

" } } }, @@ -15424,13 +15424,13 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

The capabilities found within the template. If your template contains IAM resources, you must specify the\n CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use the CreateStack or\n UpdateStack actions with your template; otherwise, those actions return an InsufficientCapabilities\n error.

\n

For more information, see Acknowledging IAM Resources in\n CloudFormation Templates.

" + "smithy.api#documentation": "

The capabilities found within the template. If your template contains IAM resources, you\n must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when you use\n the CreateStack or UpdateStack actions with your template;\n otherwise, those actions return an InsufficientCapabilities error.

\n

For more information, see Acknowledging IAM resources in CloudFormation templates.

" } }, "CapabilitiesReason": { "target": "com.amazonaws.cloudformation#CapabilitiesReason", "traits": { - "smithy.api#documentation": "

The list of resources that generated the values in the Capabilities response element.

" + "smithy.api#documentation": "

The list of resources that generated the values in the Capabilities response\n element.

" } }, "DeclaredTransforms": { @@ -15491,7 +15491,7 @@ "Type": { "target": "com.amazonaws.cloudformation#WarningType", "traits": { - "smithy.api#documentation": "

The type of this warning. For more information, see IaC generator and write-only\n properties in the CloudFormation User Guide.

\n
    \n
  • \n

    \n MUTUALLY_EXCLUSIVE_PROPERTIES - The resource requires mutually-exclusive write-only properties.\n The IaC generator selects one set of mutually exclusive properties and converts the included properties into\n parameters. The parameter names have a suffix OneOf and the parameter descriptions indicate that the\n corresponding property can be replaced with other exclusive properties.

    \n
  • \n
  • \n

    \n UNSUPPORTED_PROPERTIES - Unsupported properties are present in the resource. One example of\n unsupported properties would be a required write-only property that is an array, because a parameter cannot be an\n array. Another example is an optional write-only property.

    \n
  • \n
  • \n

    \n MUTUALLY_EXCLUSIVE_TYPES - One or more required write-only properties are found in the resource,\n and the type of that property can be any of several types.

    \n
  • \n
\n \n

Currently the resource and property reference documentation does not indicate if a property uses a type of\n oneOf or anyOf. You need to look at the resource provider schema.

\n
" + "smithy.api#documentation": "

The type of this warning. For more information, see Resolve\n write-only properties in the CloudFormation User Guide.

\n
    \n
  • \n

    \n MUTUALLY_EXCLUSIVE_PROPERTIES - The resource requires mutually-exclusive\n write-only properties. The IaC generator selects one set of mutually exclusive properties and\n converts the included properties into parameters. The parameter names have a suffix\n OneOf and the parameter descriptions indicate that the corresponding property can\n be replaced with other exclusive properties.

    \n
  • \n
  • \n

    \n UNSUPPORTED_PROPERTIES - Unsupported properties are present in the resource.\n One example of unsupported properties would be a required write-only property that is an array,\n because a parameter cannot be an array. Another example is an optional write-only\n property.

    \n
  • \n
  • \n

    \n MUTUALLY_EXCLUSIVE_TYPES - One or more required write-only properties are\n found in the resource, and the type of that property can be any of several types.

    \n
  • \n
\n \n

Currently the resource and property reference documentation does not indicate if a property\n uses a type of oneOf or anyOf. You need to look at the resource\n provider schema.

\n
" } }, "Properties": { @@ -15523,7 +15523,7 @@ "PropertyPath": { "target": "com.amazonaws.cloudformation#PropertyPath", "traits": { - "smithy.api#documentation": "

The path of the property. For example, if this is for the S3Bucket member of the Code\n property, the property path would be Code/S3Bucket.

" + "smithy.api#documentation": "

The path of the property. For example, if this is for the S3Bucket member of\n the Code property, the property path would be Code/S3Bucket.

" } }, "Required": { @@ -15572,7 +15572,7 @@ "UnrecognizedResourceTypes": { "target": "com.amazonaws.cloudformation#ResourceTypes", "traits": { - "smithy.api#documentation": "

A list of all of the unrecognized resource types. This is only returned if the\n TemplateSummaryConfig parameter has the TreatUnrecognizedResourceTypesAsWarning\n configuration set to True.

" + "smithy.api#documentation": "

A list of all of the unrecognized resource types. This is only returned if the\n TemplateSummaryConfig parameter has the\n TreatUnrecognizedResourceTypesAsWarning configuration set to\n True.

" } } }, diff --git a/models/cloudtrail.json b/models/cloudtrail.json index a14db05d24..cb7e5725ad 100644 --- a/models/cloudtrail.json +++ b/models/cloudtrail.json @@ -251,7 +251,7 @@ } }, "traits": { - "smithy.api#documentation": "

Advanced event selectors let you create fine-grained selectors for CloudTrail management and data events. They help you control costs by logging only those\n events that are important to you. For more information about advanced event selectors, see\n Logging management events and \n Logging data events in the CloudTrail User Guide.

\n

You cannot apply both event selectors and advanced event selectors to a trail.

\n

\n Supported CloudTrail event record fields for management events\n

\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n eventSource\n

    \n
  • \n
  • \n

    \n readOnly\n

    \n
  • \n
\n

\n Supported CloudTrail event record fields for data events\n

\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n resources.type (required)

    \n
  • \n
  • \n

    \n readOnly\n

    \n
  • \n
  • \n

    \n eventName\n

    \n
  • \n
  • \n

    \n resources.ARN\n

    \n
  • \n
\n \n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
" + "smithy.api#documentation": "

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those\n events that are important to you. For more information about configuring advanced event selectors, see\n the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

\n

You cannot apply both event selectors and advanced event selectors to a trail.

\n

\n Supported CloudTrail event record fields for management events\n

\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n eventSource\n

    \n
  • \n
  • \n

    \n readOnly\n

    \n
  • \n
\n

\n Supported CloudTrail event record fields for data events\n

\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n resources.type (required)

    \n
  • \n
  • \n

    \n readOnly\n

    \n
  • \n
  • \n

    \n eventName\n

    \n
  • \n
  • \n

    \n resources.ARN\n

    \n
  • \n
\n

\n Supported CloudTrail event record fields for network activity events\n

\n \n

Network activity events is in preview release for CloudTrail and is subject to change.

\n
\n
    \n
  • \n

    \n eventCategory (required)

    \n
  • \n
  • \n

    \n eventSource (required)

    \n
  • \n
  • \n

    \n eventName\n

    \n
  • \n
  • \n

    \n errorCode - The only valid value for errorCode is VpceAccessDenied.

    \n
  • \n
  • \n

    \n vpcEndpointId\n

    \n
  • \n
\n \n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
" } }, "com.amazonaws.cloudtrail#AdvancedEventSelectors": { @@ -266,7 +266,7 @@ "Field": { "target": "com.amazonaws.cloudtrail#SelectorField", "traits": { - "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For CloudTrail management events, supported fields include readOnly,\n eventCategory, and eventSource.

\n

For CloudTrail data events, supported fields include readOnly,\n eventCategory, eventName, resources.type, and resources.ARN.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
    \n
  • \n

    \n \n readOnly\n - Optional. Can be set to\n Equals a value of true or false. If you do\n not add this field, CloudTrail logs both read and\n write events. A value of true logs only\n read events. A value of false logs only\n write events.

    \n
  • \n
  • \n

    \n \n eventSource\n - For filtering\n management events only. This can be set to NotEquals\n kms.amazonaws.com or NotEquals\n rdsdata.amazonaws.com.

    \n
  • \n
  • \n

    \n \n eventName\n - Can use any operator.\n You can use it to filter in or filter out any data event logged to CloudTrail,\n such as PutBucket or GetSnapshotBlock. You can have\n multiple values for this field, separated by commas.

    \n
  • \n
  • \n

    \n \n eventCategory\n - This is required and\n must be set to Equals. \n

    \n
      \n
    • \n

      \n For CloudTrail management events, the value\n must be Management. \n

      \n
    • \n
    • \n

      \n For CloudTrail data events, the value\n must be Data. \n

      \n
    • \n
    \n

    The following are used only for event data stores:

    \n
      \n
    • \n

      \n For CloudTrail Insights events, the value\n must be Insight. \n

      \n
    • \n
    • \n

      \n For Config\n configuration items, the value must be ConfigurationItem.\n

      \n
    • \n
    • \n

      \n For Audit Manager evidence, the value must be Evidence.\n

      \n
    • \n
    • \n

      \n For non-Amazon Web Services events, the value must be ActivityAuditLog.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n resources.type\n - This field is\n required for CloudTrail data events. resources.type can only\n use the Equals operator, and the value can be one of the\n following:

    \n
      \n
    • \n

      \n AWS::DynamoDB::Table\n

      \n
    • \n
    • \n

      \n AWS::Lambda::Function\n

      \n
    • \n
    • \n

      \n AWS::S3::Object\n

      \n
    • \n
    • \n

      \n AWS::AppConfig::Configuration\n

      \n
    • \n
    • \n

      \n AWS::B2BI::Transformer\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::AgentAlias\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::KnowledgeBase\n

      \n
    • \n
    • \n

      \n AWS::Cassandra::Table\n

      \n
    • \n
    • \n

      \n AWS::CloudFront::KeyValueStore\n

      \n
    • \n
    • \n

      \n AWS::CloudTrail::Channel\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Customization\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Profile\n

      \n
    • \n
    • \n

      \n AWS::Cognito::IdentityPool\n

      \n
    • \n
    • \n

      \n AWS::DynamoDB::Stream\n

      \n
    • \n
    • \n

      \n AWS::EC2::Snapshot\n

      \n
    • \n
    • \n

      \n AWS::EMRWAL::Workspace\n

      \n
    • \n
    • \n

      \n AWS::FinSpace::Environment\n

      \n
    • \n
    • \n

      \n AWS::Glue::Table\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::ComponentVersion\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::Deployment\n

      \n
    • \n
    • \n

      \n AWS::GuardDuty::Detector\n

      \n
    • \n
    • \n

      \n AWS::IoT::Certificate\n

      \n
    • \n
    • \n

      \n AWS::IoT::Thing\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::Asset\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::TimeSeries\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Entity\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Workspace\n

      \n
    • \n
    • \n

      \n AWS::KendraRanking::ExecutionPlan\n

      \n
    • \n
    • \n

      \n AWS::KinesisVideo::Stream\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Network\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Node\n

      \n
    • \n
    • \n

      \n AWS::MedicalImaging::Datastore\n

      \n
    • \n
    • \n

      \n AWS::NeptuneGraph::Graph\n

      \n
    • \n
    • \n

      \n AWS::PCAConnectorAD::Connector\n

      \n
    • \n
    • \n

      \n AWS::QApps:QApp\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Application\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::DataSource\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Index\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::WebExperience\n

      \n
    • \n
    • \n

      \n AWS::RDS::DBCluster\n

      \n
    • \n
    • \n

      \n AWS::S3::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3ObjectLambda::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3Outposts::Object\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::Endpoint\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::ExperimentTrialComponent\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::FeatureGroup\n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Namespace \n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Service\n

      \n
    • \n
    • \n

      \n AWS::SCN::Instance\n

      \n
    • \n
    • \n

      \n AWS::SNS::PlatformEndpoint\n

      \n
    • \n
    • \n

      \n AWS::SNS::Topic\n

      \n
    • \n
    • \n

      \n AWS::SQS::Queue\n

      \n
    • \n
    • \n

      \n AWS::SSM::ManagedNode\n

      \n
    • \n
    • \n

      \n AWS::SSMMessages::ControlChannel\n

      \n
    • \n
    • \n

      \n AWS::SWF::Domain\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Device\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Environment\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Database\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Table\n

      \n
    • \n
    • \n

      \n AWS::VerifiedPermissions::PolicyStore\n

      \n
    • \n
    • \n

      \n AWS::XRay::Trace\n

      \n
    • \n
    \n

    You can have only one resources.type field per selector. To log data\n events on more than one resource type, add another selector.

    \n
  • \n
  • \n

    \n \n resources.ARN\n - You can use any\n operator with resources.ARN, but if you use Equals or\n NotEquals, the value must exactly match the ARN of a valid resource\n of the type you've specified in the template as the value of resources.type.

    \n \n

    You can't use the resources.ARN field to filter resource types that do not have ARNs.

    \n
    \n

    The resources.ARN field can be set one of the following.

    \n

    If resources.type equals AWS::S3::Object, the ARN must be in\n one of the following formats. To log all data events for all objects in a specific S3\n bucket, use the StartsWith operator, and include only the bucket ARN as\n the matching value.

    \n

    The trailing slash is intentional; do not exclude it. Replace the text between\n less than and greater than symbols (<>) with resource-specific information.

    \n
      \n
    • \n

      \n arn::s3:::/\n

      \n
    • \n
    • \n

      \n arn::s3::://\n

      \n
    • \n
    \n

    When resources.type equals AWS::DynamoDB::Table, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::dynamodb:::table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Lambda::Function, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::lambda:::function:\n

      \n
    • \n
    \n

    When resources.type equals AWS::AppConfig::Configuration, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::appconfig:::application//environment//configuration/\n

      \n
    • \n
    \n

    When resources.type equals AWS::B2BI::Transformer, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::b2bi:::transformer/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Bedrock::AgentAlias, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::bedrock:::agent-alias//\n

      \n
    • \n
    \n

    When resources.type equals AWS::Bedrock::KnowledgeBase, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::bedrock:::knowledge-base/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Cassandra::Table, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cassandra:::/keyspace//table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CloudFront::KeyValueStore, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cloudfront:::key-value-store/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CloudTrail::Channel, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cloudtrail:::channel/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CodeWhisperer::Customization, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::codewhisperer:::customization/\n

      \n
    • \n
    \n

    When resources.type equals AWS::CodeWhisperer::Profile, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::codewhisperer:::profile/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Cognito::IdentityPool, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::cognito-identity:::identitypool/\n

      \n
    • \n
    \n

    When resources.type equals AWS::DynamoDB::Stream, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::dynamodb:::table//stream/\n

      \n
    • \n
    \n

    When resources.type equals AWS::EC2::Snapshot, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::ec2:::snapshot/\n

      \n
    • \n
    \n

    When resources.type equals AWS::EMRWAL::Workspace, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::emrwal:::workspace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::FinSpace::Environment,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::finspace:::environment/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Glue::Table, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::glue:::table//\n

      \n
    • \n
    \n

    When resources.type equals AWS::GreengrassV2::ComponentVersion, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::greengrass:::components/\n

      \n
    • \n
    \n

    When resources.type equals AWS::GreengrassV2::Deployment, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::greengrass:::deployments/\n

      \n
    • \n
    \n

    When resources.type equals AWS::GuardDuty::Detector, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::guardduty:::detector/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoT::Certificate,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iot:::cert/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoT::Thing,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iot:::thing/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTSiteWise::Asset,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iotsitewise:::asset/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTSiteWise::TimeSeries,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iotsitewise:::timeseries/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTTwinMaker::Entity,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iottwinmaker:::workspace//entity/\n

      \n
    • \n
    \n

    When resources.type equals AWS::IoTTwinMaker::Workspace,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::iottwinmaker:::workspace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::KendraRanking::ExecutionPlan, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::kendra-ranking:::rescore-execution-plan/\n

      \n
    • \n
    \n

    When resources.type equals AWS::KinesisVideo::Stream, and the\n operator is set to Equals or NotEquals, the ARN must be in\n the following format:

    \n
      \n
    • \n

      \n arn::kinesisvideo:::stream//\n

      \n
    • \n
    \n

    When resources.type equals AWS::ManagedBlockchain::Network,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::managedblockchain:::networks/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ManagedBlockchain::Node,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::managedblockchain:::nodes/\n

      \n
    • \n
    \n

    When resources.type equals AWS::MedicalImaging::Datastore,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::medical-imaging:::datastore/\n

      \n
    • \n
    \n

    When resources.type equals AWS::NeptuneGraph::Graph,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::neptune-graph:::graph/\n

      \n
    • \n
    \n

    When resources.type equals AWS::PCAConnectorAD::Connector,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::pca-connector-ad:::connector/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QApps:QApp,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qapps:::application//qapp/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::Application,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::DataSource,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//index//data-source/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::Index,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//index/\n

      \n
    • \n
    \n

    When resources.type equals AWS::QBusiness::WebExperience,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::qbusiness:::application//web-experience/\n

      \n
    • \n
    \n

    When resources.type equals AWS::RDS::DBCluster,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::rds:::cluster/\n

      \n
    • \n
    \n

    When resources.type equals AWS::S3::AccessPoint, and the\n operator is set to Equals or NotEquals, the ARN must be in\n one of the following formats. To log events on all objects in an S3 access point, we\n recommend that you use only the access point ARN, don’t include the object path, and\n use the StartsWith or NotStartsWith operators.

    \n
      \n
    • \n

      \n arn::s3:::accesspoint/\n

      \n
    • \n
    • \n

      \n arn::s3:::accesspoint//object/\n

      \n
    • \n
    \n

    When resources.type equals\n AWS::S3ObjectLambda::AccessPoint, and the operator is set to\n Equals or NotEquals, the ARN must be in the following\n format:

    \n
      \n
    • \n

      \n arn::s3-object-lambda:::accesspoint/\n

      \n
    • \n
    \n

    When resources.type equals AWS::S3Outposts::Object, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::s3-outposts:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::Endpoint, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::endpoint/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::ExperimentTrialComponent, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::experiment-trial-component/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SageMaker::FeatureGroup, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::sagemaker:::feature-group/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SCN::Instance, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::scn:::instance/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ServiceDiscovery::Namespace, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::servicediscovery:::namespace/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ServiceDiscovery::Service, and the operator is set to\n Equals or NotEquals, the ARN must be in the following format:

    \n
      \n
    • \n

      \n arn::servicediscovery:::service/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SNS::PlatformEndpoint,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sns:::endpoint///\n

      \n
    • \n
    \n

    When resources.type equals AWS::SNS::Topic,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sns:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SQS::Queue,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::sqs:::\n

      \n
    • \n
    \n

    When resources.type equals AWS::SSM::ManagedNode, and\n the operator is set to Equals or NotEquals, the ARN must be\n in one of the following formats:

    \n
      \n
    • \n

      \n arn::ssm:::managed-instance/\n

      \n
    • \n
    • \n

      \n arn::ec2:::instance/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SSMMessages::ControlChannel, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::ssmmessages:::control-channel/\n

      \n
    • \n
    \n

    When resources.type equals AWS::SWF::Domain,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::swf:::domain/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ThinClient::Device, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::thinclient:::device/\n

      \n
    • \n
    \n

    When resources.type equals AWS::ThinClient::Environment, and\n the operator is set to Equals or NotEquals, the ARN must be\n in the following format:

    \n
      \n
    • \n

      \n arn::thinclient:::environment/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Timestream::Database,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::timestream:::database/\n

      \n
    • \n
    \n

    When resources.type equals AWS::Timestream::Table,\n and the operator is set to Equals or NotEquals, the ARN\n must be in the following format:

    \n
      \n
    • \n

      \n arn::timestream:::database//table/\n

      \n
    • \n
    \n

    When resources.type equals AWS::VerifiedPermissions::PolicyStore, and the operator is\n set to Equals or NotEquals, the ARN must be in the\n following format:

    \n
      \n
    • \n

      \n arn::verifiedpermissions:::policy-store/\n

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

A field in a CloudTrail event record on which to filter events to be logged. For\n event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for\n selecting events as filtering is not supported.

\n

For CloudTrail management events, supported fields include eventCategory (required), eventSource, and readOnly.

\n

For CloudTrail data events, supported fields include eventCategory (required), resources.type (required), eventName, readOnly,\n and resources.ARN.

\n

For CloudTrail network activity events, supported fields include eventCategory (required), eventSource (required), eventName,\n errorCode, and vpcEndpointId.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is\n eventCategory.

\n
    \n
  • \n

    \n \n readOnly\n - This is an optional field that is only used for management events and data events. This field can be set to\n Equals with a value of true or false. If you do\n not add this field, CloudTrail logs both read and\n write events. A value of true logs only\n read events. A value of false logs only\n write events.

    \n
  • \n
  • \n

    \n \n eventSource\n - This field is only used for management events and network activity events.

    \n

    For management events, this is an optional field that can be set to NotEquals\n kms.amazonaws.com to exclude KMS management events, or NotEquals\n rdsdata.amazonaws.com to exclude RDS management events.

    \n

    For network activity events, this is a required field that only uses the\n Equals operator. Set this field to the event source for which you want to\n log network activity events. If you want to log network activity events for multiple\n event sources, you must create a separate field selector for each event\n source.

    \n

    The following are valid values for network activity events:

    \n
      \n
    • \n

      \n cloudtrail.amazonaws.com\n

      \n
    • \n
    • \n

      \n ec2.amazonaws.com\n

      \n
    • \n
    • \n

      \n kms.amazonaws.com\n

      \n
    • \n
    • \n

      \n secretsmanager.amazonaws.com\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n eventName\n - This is an optional field that is only used for data events and network activity events. You can use any operator with \n eventName. You can use it to filter in or filter out specific events. You can have\n multiple values for this field, separated by commas.

    \n
  • \n
  • \n

    \n \n eventCategory\n - This field is required and\n must be set to Equals. \n

    \n
      \n
    • \n

      \n For CloudTrail management events, the value\n must be Management. \n

      \n
    • \n
    • \n

      \n For CloudTrail data events, the value\n must be Data. \n

      \n
    • \n
    • \n

      \n For CloudTrail network activity events, the value\n must be NetworkActivity. \n

      \n
    • \n
    \n

    The following are used only for event data stores:

    \n
      \n
    • \n

      \n For CloudTrail Insights events, the value\n must be Insight. \n

      \n
    • \n
    • \n

      \n For Config\n configuration items, the value must be ConfigurationItem.\n

      \n
    • \n
    • \n

      \n For Audit Manager evidence, the value must be Evidence.\n

      \n
    • \n
    • \n

      \n For non-Amazon Web Services events, the value must be ActivityAuditLog.\n

      \n
    • \n
    \n
  • \n
  • \n

    \n \n errorCode\n - This field is only used to filter CloudTrail network activity events\n and is optional. This is the error code to filter on. Currently, the only valid errorCode is VpceAccessDenied. \n errorCode can only use the Equals operator.

    \n
  • \n
  • \n

    \n \n resources.type\n - This field is\n required for CloudTrail data events. resources.type can only\n use the Equals operator.

    \n

    The value can be one of the following:

    \n
      \n
    • \n

      \n AWS::AppConfig::Configuration\n

      \n
    • \n
    • \n

      \n AWS::B2BI::Transformer\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::AgentAlias\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::FlowAlias\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::Guardrail\n

      \n
    • \n
    • \n

      \n AWS::Bedrock::KnowledgeBase\n

      \n
    • \n
    • \n

      \n AWS::Cassandra::Table\n

      \n
    • \n
    • \n

      \n AWS::CloudFront::KeyValueStore\n

      \n
    • \n
    • \n

      \n AWS::CloudTrail::Channel\n

      \n
    • \n
    • \n

      \n AWS::CloudWatch::Metric\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Customization\n

      \n
    • \n
    • \n

      \n AWS::CodeWhisperer::Profile\n

      \n
    • \n
    • \n

      \n AWS::Cognito::IdentityPool\n

      \n
    • \n
    • \n

      \n AWS::DynamoDB::Stream\n

      \n
    • \n
    • \n

      \n AWS::DynamoDB::Table\n

      \n
    • \n
    • \n

      \n AWS::EC2::Snapshot\n

      \n
    • \n
    • \n

      \n AWS::EMRWAL::Workspace\n

      \n
    • \n
    • \n

      \n AWS::FinSpace::Environment\n

      \n
    • \n
    • \n

      \n AWS::Glue::Table\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::ComponentVersion\n

      \n
    • \n
    • \n

      \n AWS::GreengrassV2::Deployment\n

      \n
    • \n
    • \n

      \n AWS::GuardDuty::Detector\n

      \n
    • \n
    • \n

      \n AWS::IoT::Certificate\n

      \n
    • \n
    • \n

      \n AWS::IoT::Thing\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::Asset\n

      \n
    • \n
    • \n

      \n AWS::IoTSiteWise::TimeSeries\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Entity\n

      \n
    • \n
    • \n

      \n AWS::IoTTwinMaker::Workspace\n

      \n
    • \n
    • \n

      \n AWS::KendraRanking::ExecutionPlan\n

      \n
    • \n
    • \n

      \n AWS::Kinesis::Stream\n

      \n
    • \n
    • \n

      \n AWS::Kinesis::StreamConsumer\n

      \n
    • \n
    • \n

      \n AWS::KinesisVideo::Stream\n

      \n
    • \n
    • \n

      \n AWS::Lambda::Function\n

      \n
    • \n
    • \n

      \n AWS::MachineLearning::MlModel\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Network\n

      \n
    • \n
    • \n

      \n AWS::ManagedBlockchain::Node\n

      \n
    • \n
    • \n

      \n AWS::MedicalImaging::Datastore\n

      \n
    • \n
    • \n

      \n AWS::NeptuneGraph::Graph\n

      \n
    • \n
    • \n

      \n AWS::One::UKey\n

      \n
    • \n
    • \n

      \n AWS::One::User\n

      \n
    • \n
    • \n

      \n AWS::PaymentCryptography::Alias\n

      \n
    • \n
    • \n

      \n AWS::PaymentCryptography::Key\n

      \n
    • \n
    • \n

      \n AWS::PCAConnectorAD::Connector\n

      \n
    • \n
    • \n

      \n AWS::PCAConnectorSCEP::Connector\n

      \n
    • \n
    • \n

      \n AWS::QApps:QApp\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Application\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::DataSource\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::Index\n

      \n
    • \n
    • \n

      \n AWS::QBusiness::WebExperience\n

      \n
    • \n
    • \n

      \n AWS::RDS::DBCluster\n

      \n
    • \n
    • \n

      \n AWS::RUM::AppMonitor\n

      \n
    • \n
    • \n

      \n AWS::S3::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3::Object\n

      \n
    • \n
    • \n

      \n AWS::S3Express::Object\n

      \n
    • \n
    • \n

      \n AWS::S3ObjectLambda::AccessPoint\n

      \n
    • \n
    • \n

      \n AWS::S3Outposts::Object\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::Endpoint\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::ExperimentTrialComponent\n

      \n
    • \n
    • \n

      \n AWS::SageMaker::FeatureGroup\n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Namespace \n

      \n
    • \n
    • \n

      \n AWS::ServiceDiscovery::Service\n

      \n
    • \n
    • \n

      \n AWS::SCN::Instance\n

      \n
    • \n
    • \n

      \n AWS::SNS::PlatformEndpoint\n

      \n
    • \n
    • \n

      \n AWS::SNS::Topic\n

      \n
    • \n
    • \n

      \n AWS::SQS::Queue\n

      \n
    • \n
    • \n

      \n AWS::SSM::ManagedNode\n

      \n
    • \n
    • \n

      \n AWS::SSMMessages::ControlChannel\n

      \n
    • \n
    • \n

      \n AWS::StepFunctions::StateMachine\n

      \n
    • \n
    • \n

      \n AWS::SWF::Domain\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Device\n

      \n
    • \n
    • \n

      \n AWS::ThinClient::Environment\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Database\n

      \n
    • \n
    • \n

      \n AWS::Timestream::Table\n

      \n
    • \n
    • \n

      \n AWS::VerifiedPermissions::PolicyStore\n

      \n
    • \n
    • \n

      \n AWS::XRay::Trace\n

      \n
    • \n
    \n

    You can have only one resources.type field per selector. To log events on more than one resource type, add another selector.

    \n
  • \n
  • \n

    \n \n resources.ARN\n - The resources.ARN is an optional field for \n data events. You can use any\n operator with resources.ARN, but if you use Equals or\n NotEquals, the value must exactly match the ARN of a valid resource\n of the type you've specified in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, \n use the StartsWith operator, and include only the bucket ARN as the matching value.

    \n

    For information about filtering data events on the resources.ARN field, see \n Filtering data \n events by resources.ARN in the CloudTrail User Guide.

    \n \n

    You can't use the resources.ARN field to filter resource types that do not have ARNs.

    \n
    \n
  • \n
  • \n

    \n \n vpcEndpointId\n - This field is only used to filter CloudTrail network activity events\n and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with vpcEndpointId.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -2551,12 +2551,12 @@ "Values": { "target": "com.amazonaws.cloudtrail#DataResourceValues", "traits": { - "smithy.api#documentation": "

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified\n resource type.

\n
    \n
  • \n

    To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3.

    \n \n

    This also enables logging of data event activity performed by any user or role\n in your Amazon Web Services account, even if that activity is performed on a bucket\n that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for all objects in an S3 bucket, specify the bucket and an\n empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs data\n events for all objects in this S3 bucket.

    \n
  • \n
  • \n

    To log data events for specific objects, specify the S3 bucket and object prefix\n such as arn:aws:s3:::bucket-1/example-images. The trail logs data events\n for objects in this S3 bucket that match the prefix.

    \n
  • \n
  • \n

    To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda.

    \n \n

    This also enables logging of Invoke activity performed by any user\n or role in your Amazon Web Services account, even if that activity is performed on\n a function that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for a specific Lambda function, specify the\n function ARN.

    \n \n

    Lambda function ARNs are exact. For example, if you specify a\n function ARN\n arn:aws:lambda:us-west-2:111111111111:function:helloworld,\n data events will only be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld.\n They will not be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld2.

    \n
    \n
  • \n
  • \n

    To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb.

    \n
  • \n
" + "smithy.api#documentation": "

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified\n resource type.

\n
    \n
  • \n

    To log data events for all objects in all S3 buckets in your Amazon Web Services account, specify the prefix as arn:aws:s3.

    \n \n

    This also enables logging of data event activity performed by any user or role\n in your Amazon Web Services account, even if that activity is performed on a bucket\n that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for all objects in an S3 bucket, specify the bucket and an\n empty object prefix such as arn:aws:s3:::amzn-s3-demo-bucket1/. The trail logs data\n events for all objects in this S3 bucket.

    \n
  • \n
  • \n

    To log data events for specific objects, specify the S3 bucket and object prefix\n such as arn:aws:s3:::amzn-s3-demo-bucket1/example-images. The trail logs data events\n for objects in this S3 bucket that match the prefix.

    \n
  • \n
  • \n

    To log data events for all Lambda functions in your Amazon Web Services account, specify the prefix as arn:aws:lambda.

    \n \n

    This also enables logging of Invoke activity performed by any user\n or role in your Amazon Web Services account, even if that activity is performed on\n a function that belongs to another Amazon Web Services account.

    \n
    \n
  • \n
  • \n

    To log data events for a specific Lambda function, specify the\n function ARN.

    \n \n

    Lambda function ARNs are exact. For example, if you specify a\n function ARN\n arn:aws:lambda:us-west-2:111111111111:function:helloworld,\n data events will only be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld.\n They will not be logged for\n arn:aws:lambda:us-west-2:111111111111:function:helloworld2.

    \n
    \n
  • \n
  • \n

    To log data events for all DynamoDB tables in your Amazon Web Services account, specify the prefix as arn:aws:dynamodb.

    \n
  • \n
" } } }, "traits": { - "smithy.api#documentation": "

Data events provide information about the resource operations performed on or within a resource\n itself. These are also known as data plane operations. You can specify up to 250 data\n resources for a trail.

\n

Configure the DataResource to specify the resource type and resource ARNs for which you want to log data events.

\n

You can specify the following resource types in your event selectors for your trail:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n \n

The total number of allowed data resources is 250. This number can be distributed\n between 1 and 5 event selectors, but the total cannot exceed 250 across all\n selectors for the trail.

\n

If you are using advanced event selectors, the maximum total number of values for\n all conditions, across all advanced event selectors for the trail, is 500.

\n
\n

The following example demonstrates how logging works when you configure logging of all\n data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read\n and Write data events.

\n
    \n
  1. \n

    A user uploads an image file to bucket-1.

    \n
  2. \n
  3. \n

    The PutObject API operation is an Amazon S3 object-level API.\n It is recorded as a data event in CloudTrail. Because the CloudTrail\n user specified an S3 bucket with an empty prefix, events that occur on any object in\n that bucket are logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    A user uploads an object to an Amazon S3 bucket named\n arn:aws:s3:::bucket-2.

    \n
  6. \n
  7. \n

    The PutObject API operation occurred for an object in an S3 bucket\n that the CloudTrail user didn't specify for the trail. The trail doesn’t log\n the event.

    \n
  8. \n
\n

The following example demonstrates how logging works when you configure logging of\n Lambda data events for a Lambda function named\n MyLambdaFunction, but not for all Lambda\n functions.

\n
    \n
  1. \n

    A user runs a script that includes a call to the\n MyLambdaFunction function and the\n MyOtherLambdaFunction function.

    \n
  2. \n
  3. \n

    The Invoke API operation on MyLambdaFunction is\n an Lambda API. It is recorded as a data event in CloudTrail.\n Because the CloudTrail user specified logging data events for\n MyLambdaFunction, any invocations of that function are\n logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    The Invoke API operation on\n MyOtherLambdaFunction is an Lambda API. Because\n the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for\n MyOtherLambdaFunction does not match the function specified\n for the trail. The trail doesn’t log the event.

    \n
  6. \n
" + "smithy.api#documentation": "

You can configure the DataResource in an EventSelector to log data events for the following three resource types:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n

To log data events for all other resource types including objects stored in \n directory buckets, you must use AdvancedEventSelectors. You must also \n use AdvancedEventSelectors if you want to filter on the eventName field.

\n

Configure the DataResource to specify the resource type and resource ARNs for which you want to log data events.

\n \n

The total number of allowed data resources is 250. This number can be distributed\n between 1 and 5 event selectors, but the total cannot exceed 250 across all\n selectors for the trail.

\n
\n

The following example demonstrates how logging works when you configure logging of all\n data events for a general purpose bucket named amzn-s3-demo-bucket1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read\n and Write data events.

\n
    \n
  1. \n

    A user uploads an image file to amzn-s3-demo-bucket1.

    \n
  2. \n
  3. \n

    The PutObject API operation is an Amazon S3 object-level API.\n It is recorded as a data event in CloudTrail. Because the CloudTrail\n user specified an S3 bucket with an empty prefix, events that occur on any object in\n that bucket are logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    A user uploads an object to an Amazon S3 bucket named\n arn:aws:s3:::amzn-s3-demo-bucket1.

    \n
  6. \n
  7. \n

    The PutObject API operation occurred for an object in an S3 bucket\n that the CloudTrail user didn't specify for the trail. The trail doesn’t log\n the event.

    \n
  8. \n
\n

The following example demonstrates how logging works when you configure logging of\n Lambda data events for a Lambda function named\n MyLambdaFunction, but not for all Lambda\n functions.

\n
    \n
  1. \n

    A user runs a script that includes a call to the\n MyLambdaFunction function and the\n MyOtherLambdaFunction function.

    \n
  2. \n
  3. \n

    The Invoke API operation on MyLambdaFunction is\n an Lambda API. It is recorded as a data event in CloudTrail.\n Because the CloudTrail user specified logging data events for\n MyLambdaFunction, any invocations of that function are\n logged. The trail processes and logs the event.

    \n
  4. \n
  5. \n

    The Invoke API operation on\n MyOtherLambdaFunction is an Lambda API. Because\n the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for\n MyOtherLambdaFunction does not match the function specified\n for the trail. The trail doesn’t log the event.

    \n
  6. \n
" } }, "com.amazonaws.cloudtrail#DataResourceValues": { @@ -3867,7 +3867,7 @@ "DataResources": { "target": "com.amazonaws.cloudtrail#DataResources", "traits": { - "smithy.api#documentation": "

CloudTrail supports data event logging for Amazon S3 objects, Lambda functions, and Amazon DynamoDB tables with basic event selectors.\n You can specify up to 250 resources for an individual event selector, but the total number\n of data resources cannot exceed 250 across all event selectors in a trail. This limit does\n not apply if you configure resource logging for all data events.

\n

For more information, see Data\n Events and Limits in CloudTrail in the CloudTrail User\n Guide.

" + "smithy.api#documentation": "

CloudTrail supports data event logging for Amazon S3 objects in standard S3 buckets, Lambda functions, and Amazon DynamoDB tables with basic event selectors.\n You can specify up to 250 resources for an individual event selector, but the total number\n of data resources cannot exceed 250 across all event selectors in a trail. This limit does\n not apply if you configure resource logging for all data events.

\n

For more information, see Data\n Events and Limits in CloudTrail in the CloudTrail User\n Guide.

\n \n

To log data events for all other resource types including objects stored in \n directory buckets, you must use AdvancedEventSelectors. You must also \n use AdvancedEventSelectors if you want to filter on the eventName field.

\n
" } }, "ExcludeManagementEventSources": { @@ -4209,7 +4209,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes the settings for the event selectors that you configured for your trail. The\n information returned for your event selectors includes the following:

\n
    \n
  • \n

    If your event selector includes read-only events, write-only events, or all\n events. This applies to both management events and data events.

    \n
  • \n
  • \n

    If your event selector includes management events.

    \n
  • \n
  • \n

    If your event selector includes data events, the resources on which you are\n logging data events.

    \n
  • \n
\n

For more information about logging management and data events, see the following topics\n in the CloudTrail User Guide:

\n ", + "smithy.api#documentation": "

Describes the settings for the event selectors that you configured for your trail. The\n information returned for your event selectors includes the following:

\n
    \n
  • \n

    If your event selector includes read-only events, write-only events, or all\n events. This applies to management events, data events, and network activity events.

    \n
  • \n
  • \n

    If your event selector includes management events.

    \n
  • \n
  • \n

    If your event selector includes network activity events, the event sources \n for which you are logging network activity events.

    \n
  • \n
  • \n

    If your event selector includes data events, the resources on which you are\n logging data events.

    \n
  • \n
\n

For more information about logging management, data, and network activity events, see the following topics\n in the CloudTrail User Guide:

\n ", "smithy.api#idempotent": {} } }, @@ -7366,7 +7366,7 @@ } ], "traits": { - "smithy.api#documentation": "

Configures an event selector or advanced event selectors for your trail. Use event\n selectors or advanced event selectors to specify management and data event settings for\n your trail. If you want your trail to log Insights events, be sure the event selector \n enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide.\n By default, trails created without specific event selectors are configured to\n log all read and write management events, and no data events.

\n

When an event occurs in your account, CloudTrail evaluates the event selectors or\n advanced event selectors in all trails. For each trail, if the event matches any event\n selector, the trail processes and logs the event. If the event doesn't match any event\n selector, the trail doesn't log the event.

\n

Example

\n
    \n
  1. \n

    You create an event selector for a trail and specify that you want write-only\n events.

    \n
  2. \n
  3. \n

    The EC2 GetConsoleOutput and RunInstances API operations\n occur in your account.

    \n
  4. \n
  5. \n

    CloudTrail evaluates whether the events match your event selectors.

    \n
  6. \n
  7. \n

    The RunInstances is a write-only event and it matches your event\n selector. The trail logs the event.

    \n
  8. \n
  9. \n

    The GetConsoleOutput is a read-only event that doesn't match your\n event selector. The trail doesn't log the event.

    \n
  10. \n
\n

The PutEventSelectors operation must be called from the Region in which the\n trail was created; otherwise, an InvalidHomeRegionException exception is\n thrown.

\n

You can configure up to five event selectors for each trail. For more information, see\n Logging management events, Logging\n data events, and Quotas in CloudTrail in the CloudTrail User\n Guide.

\n

You can add advanced event selectors, and conditions for your advanced event selectors,\n up to a maximum of 500 values for all conditions and selectors on a trail. You can use\n either AdvancedEventSelectors or EventSelectors, but not both. If\n you apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten. For more information about advanced event\n selectors, see Logging data events in the CloudTrail User Guide.

", + "smithy.api#documentation": "

Configures event selectors (also referred to as basic event selectors) or advanced event selectors for your trail. You can use\n either AdvancedEventSelectors or EventSelectors, but not both. If\n you apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten.

\n

You can use AdvancedEventSelectors to \n log management events, data events for all resource types, and network activity events.

\n

You can use EventSelectors to log management events and data events for the following resource types:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n

You can't use EventSelectors to log network activity events.

\n

If you want your trail to log Insights events, be sure the event selector or advanced event selector enables \n logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide.\n By default, trails created without specific event selectors are configured to\n log all read and write management events, and no data events or network activity events.

\n

When an event occurs in your account, CloudTrail evaluates the event selectors or\n advanced event selectors in all trails. For each trail, if the event matches any event\n selector, the trail processes and logs the event. If the event doesn't match any event\n selector, the trail doesn't log the event.

\n

Example

\n
    \n
  1. \n

    You create an event selector for a trail and specify that you want to log write-only\n events.

    \n
  2. \n
  3. \n

    The EC2 GetConsoleOutput and RunInstances API operations\n occur in your account.

    \n
  4. \n
  5. \n

    CloudTrail evaluates whether the events match your event selectors.

    \n
  6. \n
  7. \n

    The RunInstances is a write-only event and it matches your event\n selector. The trail logs the event.

    \n
  8. \n
  9. \n

    The GetConsoleOutput is a read-only event that doesn't match your\n event selector. The trail doesn't log the event.

    \n
  10. \n
\n

The PutEventSelectors operation must be called from the Region in which the\n trail was created; otherwise, an InvalidHomeRegionException exception is\n thrown.

\n

You can configure up to five event selectors for each trail.

\n

You can add advanced event selectors, and conditions for your advanced event selectors,\n up to a maximum of 500 values for all conditions and selectors on a trail. For more information, see\n Logging management events, Logging\n data events, Logging\n network activity events, and Quotas in CloudTrail in the CloudTrail User\n Guide.

", "smithy.api#idempotent": {} } }, @@ -7383,13 +7383,13 @@ "EventSelectors": { "target": "com.amazonaws.cloudtrail#EventSelectors", "traits": { - "smithy.api#documentation": "

Specifies the settings for your event selectors. You can configure up to five event\n selectors for a trail. You can use either EventSelectors or\n AdvancedEventSelectors in a PutEventSelectors request, but not\n both. If you apply EventSelectors to a trail, any existing\n AdvancedEventSelectors are overwritten.

" + "smithy.api#documentation": "

Specifies the settings for your event selectors. You can use event selectors to log management events and data events for the following resource types:

\n
    \n
  • \n

    \n AWS::DynamoDB::Table\n

    \n
  • \n
  • \n

    \n AWS::Lambda::Function\n

    \n
  • \n
  • \n

    \n AWS::S3::Object\n

    \n
  • \n
\n

You can't use event selectors to log network activity events.

\n

You can configure up to five event\n selectors for a trail. You can use either EventSelectors or\n AdvancedEventSelectors in a PutEventSelectors request, but not\n both. If you apply EventSelectors to a trail, any existing\n AdvancedEventSelectors are overwritten.

" } }, "AdvancedEventSelectors": { "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", "traits": { - "smithy.api#documentation": "

Specifies the settings for advanced event selectors. You can add advanced event\n selectors, and conditions for your advanced event selectors, up to a maximum of 500 values\n for all conditions and selectors on a trail. You can use either\n AdvancedEventSelectors or EventSelectors, but not both. If you\n apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten. For more information about advanced event\n selectors, see Logging data events in the CloudTrail User Guide.

" + "smithy.api#documentation": "

Specifies the settings for advanced event selectors. You can use advanced event selectors to \n log management events, data events for all resource types, and network activity events.

\n

You can add advanced event\n selectors, and conditions for your advanced event selectors, up to a maximum of 500 values\n for all conditions and selectors on a trail. You can use either\n AdvancedEventSelectors or EventSelectors, but not both. If you\n apply AdvancedEventSelectors to a trail, any existing\n EventSelectors are overwritten. For more information about advanced event\n selectors, see Logging data events and \n Logging network activity events\n in the CloudTrail User Guide.

" } } }, @@ -8518,7 +8518,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION \n and the eventCategory must be Management, Data, or ConfigurationItem.

" + "smithy.api#documentation": "

Starts the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To start ingestion, the event data store Status must be STOPPED_INGESTION \n and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem.

" } }, "com.amazonaws.cloudtrail#StartEventDataStoreIngestionRequest": { @@ -8903,7 +8903,7 @@ } ], "traits": { - "smithy.api#documentation": "

Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED \n and the eventCategory must be Management, Data, or ConfigurationItem.

" + "smithy.api#documentation": "

Stops the ingestion of live events on an event data store specified as either an ARN or the ID portion of the ARN. To stop ingestion, the event data store Status must be ENABLED \n and the eventCategory must be Management, Data, NetworkActivity, or ConfigurationItem.

" } }, "com.amazonaws.cloudtrail#StopEventDataStoreIngestionRequest": { @@ -9621,7 +9621,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates an event data store. The required EventDataStore value is an ARN or\n the ID portion of the ARN. Other parameters are optional, but at least one optional\n parameter must be specified, or CloudTrail throws an error.\n RetentionPeriod is in days, and valid values are integers between 7 and\n 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled.

\n

For event data stores for CloudTrail events, AdvancedEventSelectors\n includes or excludes management or data events in your event data store. For more\n information about AdvancedEventSelectors, see AdvancedEventSelectors.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events,\n AdvancedEventSelectors includes events of that type in your event data store.

", + "smithy.api#documentation": "

Updates an event data store. The required EventDataStore value is an ARN or\n the ID portion of the ARN. Other parameters are optional, but at least one optional\n parameter must be specified, or CloudTrail throws an error.\n RetentionPeriod is in days, and valid values are integers between 7 and\n 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled.

\n

For event data stores for CloudTrail events, AdvancedEventSelectors\n includes or excludes management, data, or network activity events in your event data store. For more\n information about AdvancedEventSelectors, see AdvancedEventSelectors.

\n

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events,\n AdvancedEventSelectors includes events of that type in your event data store.

", "smithy.api#idempotent": {} } }, diff --git a/models/codeartifact.json b/models/codeartifact.json index 7605c3772a..487ad11dc4 100644 --- a/models/codeartifact.json +++ b/models/codeartifact.json @@ -3182,6 +3182,23 @@ "target": "com.amazonaws.codeartifact#DomainSummary" } }, + "com.amazonaws.codeartifact#EndpointType": { + "type": "enum", + "members": { + "DUALSTACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "dualstack" + } + }, + "IPV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ipv4" + } + } + } + }, "com.amazonaws.codeartifact#ErrorMessage": { "type": "string" }, @@ -3817,6 +3834,13 @@ "smithy.api#httpQuery": "format", "smithy.api#required": {} } + }, + "endpointType": { + "target": "com.amazonaws.codeartifact#EndpointType", + "traits": { + "smithy.api#documentation": "

A string that specifies the type of endpoint.

", + "smithy.api#httpQuery": "endpointType" + } } }, "traits": { @@ -6990,7 +7014,7 @@ "packageFormat": { "target": "com.amazonaws.codeartifact#PackageFormat", "traits": { - "smithy.api#documentation": "

\n The package format associated with a repository's external connection. The valid package formats are:\n

\n
    \n
  • \n

    \n npm: A Node Package Manager (npm) package.\n

    \n
  • \n
  • \n

    \n pypi: A Python Package Index (PyPI) package.\n

    \n
  • \n
  • \n

    \n maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.\n

    \n
  • \n
  • \n

    \n nuget: A NuGet package.\n

    \n
  • \n
" + "smithy.api#documentation": "

\n The package format associated with a repository's external connection. The valid package formats are:\n

\n
    \n
  • \n

    \n npm: A Node Package Manager (npm) package.\n

    \n
  • \n
  • \n

    \n pypi: A Python Package Index (PyPI) package.\n

    \n
  • \n
  • \n

    \n maven: A Maven package that contains compiled code in a distributable format, such as a JAR file.\n

    \n
  • \n
  • \n

    \n nuget: A NuGet package.\n

    \n
  • \n
  • \n

    \n generic: A generic package.\n

    \n
  • \n
  • \n

    \n ruby: A Ruby package.\n

    \n
  • \n
  • \n

    \n swift: A Swift package.\n

    \n
  • \n
  • \n

    \n cargo: A Cargo package.\n

    \n
  • \n
" } }, "status": { diff --git a/models/codebuild.json b/models/codebuild.json index fa7f301f26..9a44cf5d3b 100644 --- a/models/codebuild.json +++ b/models/codebuild.json @@ -2902,6 +2902,12 @@ "vpcConfig": { "target": "com.amazonaws.codebuild#VpcConfig" }, + "proxyConfiguration": { + "target": "com.amazonaws.codebuild#ProxyConfiguration", + "traits": { + "smithy.api#documentation": "

The proxy configuration of the compute fleet.

" + } + }, "imageId": { "target": "com.amazonaws.codebuild#NonEmptyString", "traits": { @@ -4145,6 +4151,12 @@ "vpcConfig": { "target": "com.amazonaws.codebuild#VpcConfig" }, + "proxyConfiguration": { + "target": "com.amazonaws.codebuild#ProxyConfiguration", + "traits": { + "smithy.api#documentation": "

The proxy configuration of the compute fleet.

" + } + }, "imageId": { "target": "com.amazonaws.codebuild#NonEmptyString", "traits": { @@ -4262,6 +4274,110 @@ } } }, + "com.amazonaws.codebuild#FleetProxyRule": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.codebuild#FleetProxyRuleType", + "traits": { + "smithy.api#documentation": "

The type of proxy rule.

", + "smithy.api#required": {} + } + }, + "effect": { + "target": "com.amazonaws.codebuild#FleetProxyRuleEffectType", + "traits": { + "smithy.api#documentation": "

The behavior of the proxy rule.

", + "smithy.api#required": {} + } + }, + "entities": { + "target": "com.amazonaws.codebuild#FleetProxyRuleEntities", + "traits": { + "smithy.api#documentation": "

The destination of the proxy rule.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the proxy rule for your reserved capacity instances.

" + } + }, + "com.amazonaws.codebuild#FleetProxyRuleBehavior": { + "type": "enum", + "members": { + "ALLOW_ALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALLOW_ALL" + } + }, + "DENY_ALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DENY_ALL" + } + } + } + }, + "com.amazonaws.codebuild#FleetProxyRuleEffectType": { + "type": "enum", + "members": { + "ALLOW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ALLOW" + } + }, + "DENY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DENY" + } + } + } + }, + "com.amazonaws.codebuild#FleetProxyRuleEntities": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#String" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.codebuild#FleetProxyRuleType": { + "type": "enum", + "members": { + "DOMAIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOMAIN" + } + }, + "IP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IP" + } + } + } + }, + "com.amazonaws.codebuild#FleetProxyRules": { + "type": "list", + "member": { + "target": "com.amazonaws.codebuild#FleetProxyRule" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.codebuild#FleetScalingMetricType": { "type": "enum", "members": { @@ -6565,6 +6681,26 @@ "target": "com.amazonaws.codebuild#Project" } }, + "com.amazonaws.codebuild#ProxyConfiguration": { + "type": "structure", + "members": { + "defaultBehavior": { + "target": "com.amazonaws.codebuild#FleetProxyRuleBehavior", + "traits": { + "smithy.api#documentation": "

The default behavior of outgoing traffic.

" + } + }, + "orderedProxyRules": { + "target": "com.amazonaws.codebuild#FleetProxyRules", + "traits": { + "smithy.api#documentation": "

An array of FleetProxyRule objects that represent the specified destination domains or IPs to allow or deny network access control to.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the proxy configurations that apply network access control to your reserved capacity instances.

" + } + }, "com.amazonaws.codebuild#PutResourcePolicy": { "type": "operation", "input": { @@ -7455,20 +7591,20 @@ "name": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

The name of either the enterprise or organization that will send webhook events to CodeBuild, depending on if the webhook is a global or organization webhook respectively.

", + "smithy.api#documentation": "

The name of either the group, enterprise, or organization that will send webhook events to CodeBuild, depending on the type of webhook.

", "smithy.api#required": {} } }, "domain": { "target": "com.amazonaws.codebuild#String", "traits": { - "smithy.api#documentation": "

The domain of the GitHub Enterprise organization. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE

" + "smithy.api#documentation": "

The domain of the GitHub Enterprise organization or the GitLab Self Managed group. Note that this parameter is only required if your project's source type is GITHUB_ENTERPRISE or GITLAB_SELF_MANAGED.

" } }, "scope": { "target": "com.amazonaws.codebuild#WebhookScopeType", "traits": { - "smithy.api#documentation": "

The type of scope for a GitHub webhook.

", + "smithy.api#documentation": "

The type of scope for a GitHub or GitLab webhook.

", "smithy.api#required": {} } } @@ -8579,6 +8715,12 @@ "vpcConfig": { "target": "com.amazonaws.codebuild#VpcConfig" }, + "proxyConfiguration": { + "target": "com.amazonaws.codebuild#ProxyConfiguration", + "traits": { + "smithy.api#documentation": "

The proxy configuration of the compute fleet.

" + } + }, "imageId": { "target": "com.amazonaws.codebuild#NonEmptyString", "traits": { @@ -9189,6 +9331,12 @@ "traits": { "smithy.api#enumValue": "RELEASE_NAME" } + }, + "REPOSITORY_NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPOSITORY_NAME" + } } } }, @@ -9206,6 +9354,12 @@ "traits": { "smithy.api#enumValue": "GITHUB_GLOBAL" } + }, + "GITLAB_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GITLAB_GROUP" + } } } }, diff --git a/models/codeconnections.json b/models/codeconnections.json index 84bc2a569b..b98ddba7ac 100644 --- a/models/codeconnections.json +++ b/models/codeconnections.json @@ -917,7 +917,7 @@ "ConnectionArn": { "target": "com.amazonaws.codeconnections#ConnectionArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection\n reference when the connection is shared between Amazon Web Services.

\n \n

The ARN is never reused if the connection is deleted.

\n
" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connection. The ARN is used as the connection\n reference when the connection is shared between Amazon Web Servicesservices.

\n \n

The ARN is never reused if the connection is deleted.

\n
" } }, "ProviderType": { @@ -1335,6 +1335,12 @@ "traits": { "smithy.api#documentation": "

When to trigger Git sync to begin the stack update.

" } + }, + "PullRequestComment": { + "target": "com.amazonaws.codeconnections#PullRequestComment", + "traits": { + "smithy.api#documentation": "

A toggle that specifies whether to enable or disable pull request comments for the sync configuration to be created.

" + } } }, "traits": { @@ -2717,6 +2723,23 @@ } } }, + "com.amazonaws.codeconnections#PullRequestComment": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.codeconnections#RepositoryLinkArn": { "type": "string", "traits": { @@ -3463,6 +3486,12 @@ "traits": { "smithy.api#documentation": "

When to trigger Git sync to begin the stack update.

" } + }, + "PullRequestComment": { + "target": "com.amazonaws.codeconnections#PullRequestComment", + "traits": { + "smithy.api#documentation": "

A toggle that specifies whether to enable or disable pull request comments for the sync configuration to be created.

" + } } }, "traits": { @@ -4071,6 +4100,12 @@ "traits": { "smithy.api#documentation": "

When to trigger Git sync to begin the stack update.

" } + }, + "PullRequestComment": { + "target": "com.amazonaws.codeconnections#PullRequestComment", + "traits": { + "smithy.api#documentation": "

TA toggle that specifies whether to enable or disable pull request comments for the sync configuration to be updated.

" + } } }, "traits": { diff --git a/models/codepipeline.json b/models/codepipeline.json index ba65ae5316..b82aaef79f 100644 --- a/models/codepipeline.json +++ b/models/codepipeline.json @@ -250,6 +250,12 @@ "traits": { "smithy.api#enumValue": "Approval" } + }, + "Compute": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Compute" + } } } }, @@ -446,6 +452,12 @@ "smithy.api#documentation": "

The action's configuration. These are key-value pairs that specify input values for\n an action. For more information, see Action Structure Requirements in CodePipeline. For the list of\n configuration properties for the CloudFormation action type in CodePipeline, see Configuration Properties Reference in the CloudFormation\n User Guide. For template snippets with examples, see Using Parameter Override Functions with CodePipeline Pipelines in the\n CloudFormation User Guide.

\n

The values can be represented in either JSON or YAML format. For example, the JSON\n configuration item format is as follows:

\n

\n JSON:\n

\n

\n \"Configuration\" : { Key : Value },\n

" } }, + "commands": { + "target": "com.amazonaws.codepipeline#CommandList", + "traits": { + "smithy.api#documentation": "

The shell commands to run with your compute action in CodePipeline. All commands\n are supported except multi-line formats. While CodeBuild logs and permissions\n are used, you do not need to create any resources in CodeBuild.

\n \n

Using compute time for this action will incur separate charges in CodeBuild.

\n
" + } + }, "outputArtifacts": { "target": "com.amazonaws.codepipeline#OutputArtifactList", "traits": { @@ -458,6 +470,12 @@ "smithy.api#documentation": "

The name or ID of the artifact consumed by the action, such as a test or build\n artifact.

" } }, + "outputVariables": { + "target": "com.amazonaws.codepipeline#OutputVariableList", + "traits": { + "smithy.api#documentation": "

The list of variables that are to be exported from the compute action. This is\n specifically CodeBuild environment variables as used for that action.

" + } + }, "roleArn": { "target": "com.amazonaws.codepipeline#RoleArn", "traits": { @@ -1916,7 +1934,7 @@ "name": "codepipeline" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "CodePipeline\n

\n Overview\n

\n

This is the CodePipeline API Reference. This guide provides descriptions\n of the actions and data types for CodePipeline. Some functionality for your\n pipeline can only be configured through the API. For more information, see the CodePipeline User Guide.

\n

You can use the CodePipeline API to work with pipelines, stages, actions,\n and transitions.

\n

\n Pipelines are models of automated release processes. Each pipeline\n is uniquely named, and consists of stages, actions, and transitions.

\n

You can work with pipelines by calling:

\n
    \n
  • \n

    \n CreatePipeline, which creates a uniquely named\n pipeline.

    \n
  • \n
  • \n

    \n DeletePipeline, which deletes the specified\n pipeline.

    \n
  • \n
  • \n

    \n GetPipeline, which returns information about the pipeline\n structure and pipeline metadata, including the pipeline Amazon Resource Name\n (ARN).

    \n
  • \n
  • \n

    \n GetPipelineExecution, which returns information about a\n specific execution of a pipeline.

    \n
  • \n
  • \n

    \n GetPipelineState, which returns information about the current\n state of the stages and actions of a pipeline.

    \n
  • \n
  • \n

    \n ListActionExecutions, which returns action-level details\n for past executions. The details include full stage and action-level details,\n including individual action duration, status, any errors that occurred during\n the execution, and input and output artifact location details.

    \n
  • \n
  • \n

    \n ListPipelines, which gets a summary of all of the pipelines\n associated with your account.

    \n
  • \n
  • \n

    \n ListPipelineExecutions, which gets a summary of the most\n recent executions for a pipeline.

    \n
  • \n
  • \n

    \n StartPipelineExecution, which runs the most recent revision of\n an artifact through the pipeline.

    \n
  • \n
  • \n

    \n StopPipelineExecution, which stops the specified pipeline\n execution from continuing through the pipeline.

    \n
  • \n
  • \n

    \n UpdatePipeline, which updates a pipeline with edits or changes\n to the structure of the pipeline.

    \n
  • \n
\n

Pipelines include stages. Each stage contains one or more\n actions that must complete before the next stage begins. A stage results in success or\n failure. If a stage fails, the pipeline stops at that stage and remains stopped until\n either a new version of an artifact appears in the source location, or a user takes\n action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the\n status of stages in the pipeline, or GetPipeline, which returns the\n entire structure of the pipeline, including the stages of that pipeline. For more\n information about the structure of stages and actions, see CodePipeline\n Pipeline Structure Reference.

\n

Pipeline stages include actions that are categorized into\n categories such as source or build actions performed in a stage of a pipeline. For\n example, you can use a source action to import artifacts into a pipeline from a source\n such as Amazon S3. Like stages, you do not work with actions directly in most cases, but\n you do define and interact with actions when working with pipeline operations such as\n CreatePipeline and GetPipelineState. Valid\n action categories are:

\n
    \n
  • \n

    Source

    \n
  • \n
  • \n

    Build

    \n
  • \n
  • \n

    Test

    \n
  • \n
  • \n

    Deploy

    \n
  • \n
  • \n

    Approval

    \n
  • \n
  • \n

    Invoke

    \n
  • \n
\n

Pipelines also include transitions, which allow the transition\n of artifacts from one stage to the next in a pipeline after the actions in one stage\n complete.

\n

You can work with transitions by calling:

\n
    \n
  • \n

    \n DisableStageTransition, which prevents artifacts from\n transitioning to the next stage in a pipeline.

    \n
  • \n
  • \n

    \n EnableStageTransition, which enables transition of artifacts\n between stages in a pipeline.

    \n
  • \n
\n

\n Using the API to integrate with CodePipeline\n

\n

For third-party integrators or developers who want to create their own integrations\n with CodePipeline, the expected sequence varies from the standard API user. To\n integrate with CodePipeline, developers need to work with the following\n items:

\n

\n Jobs, which are instances of an action. For\n example, a job for a source action might import a revision of an artifact from a source.

\n

You can work with jobs by calling:

\n
    \n
  • \n

    \n AcknowledgeJob, which confirms whether a job worker has\n received the specified job.

    \n
  • \n
  • \n

    \n GetJobDetails, which returns the details of a job.

    \n
  • \n
  • \n

    \n PollForJobs, which determines whether there are any jobs to\n act on.

    \n
  • \n
  • \n

    \n PutJobFailureResult, which provides details of a job failure.\n

    \n
  • \n
  • \n

    \n PutJobSuccessResult, which provides details of a job\n success.

    \n
  • \n
\n

\n Third party jobs, which are instances of an action\n created by a partner action and integrated into CodePipeline. Partner actions are\n created by members of the Amazon Web Services Partner Network.

\n

You can work with third party jobs by calling:

\n ", + "smithy.api#documentation": "CodePipeline\n

\n Overview\n

\n

This is the CodePipeline API Reference. This guide provides descriptions\n of the actions and data types for CodePipeline. Some functionality for your\n pipeline can only be configured through the API. For more information, see the CodePipeline User Guide.

\n

You can use the CodePipeline API to work with pipelines, stages, actions,\n and transitions.

\n

\n Pipelines are models of automated release processes. Each pipeline\n is uniquely named, and consists of stages, actions, and transitions.

\n

You can work with pipelines by calling:

\n
    \n
  • \n

    \n CreatePipeline, which creates a uniquely named\n pipeline.

    \n
  • \n
  • \n

    \n DeletePipeline, which deletes the specified\n pipeline.

    \n
  • \n
  • \n

    \n GetPipeline, which returns information about the pipeline\n structure and pipeline metadata, including the pipeline Amazon Resource Name\n (ARN).

    \n
  • \n
  • \n

    \n GetPipelineExecution, which returns information about a\n specific execution of a pipeline.

    \n
  • \n
  • \n

    \n GetPipelineState, which returns information about the current\n state of the stages and actions of a pipeline.

    \n
  • \n
  • \n

    \n ListActionExecutions, which returns action-level details\n for past executions. The details include full stage and action-level details,\n including individual action duration, status, any errors that occurred during\n the execution, and input and output artifact location details.

    \n
  • \n
  • \n

    \n ListPipelines, which gets a summary of all of the pipelines\n associated with your account.

    \n
  • \n
  • \n

    \n ListPipelineExecutions, which gets a summary of the most\n recent executions for a pipeline.

    \n
  • \n
  • \n

    \n StartPipelineExecution, which runs the most recent revision of\n an artifact through the pipeline.

    \n
  • \n
  • \n

    \n StopPipelineExecution, which stops the specified pipeline\n execution from continuing through the pipeline.

    \n
  • \n
  • \n

    \n UpdatePipeline, which updates a pipeline with edits or changes\n to the structure of the pipeline.

    \n
  • \n
\n

Pipelines include stages. Each stage contains one or more\n actions that must complete before the next stage begins. A stage results in success or\n failure. If a stage fails, the pipeline stops at that stage and remains stopped until\n either a new version of an artifact appears in the source location, or a user takes\n action to rerun the most recent artifact through the pipeline. You can call GetPipelineState, which displays the status of a pipeline, including the\n status of stages in the pipeline, or GetPipeline, which returns the\n entire structure of the pipeline, including the stages of that pipeline. For more\n information about the structure of stages and actions, see CodePipeline\n Pipeline Structure Reference.

\n

Pipeline stages include actions that are categorized into\n categories such as source or build actions performed in a stage of a pipeline. For\n example, you can use a source action to import artifacts into a pipeline from a source\n such as Amazon S3. Like stages, you do not work with actions directly in most cases, but\n you do define and interact with actions when working with pipeline operations such as\n CreatePipeline and GetPipelineState. Valid\n action categories are:

\n
    \n
  • \n

    Source

    \n
  • \n
  • \n

    Build

    \n
  • \n
  • \n

    Test

    \n
  • \n
  • \n

    Deploy

    \n
  • \n
  • \n

    Approval

    \n
  • \n
  • \n

    Invoke

    \n
  • \n
  • \n

    Compute

    \n
  • \n
\n

Pipelines also include transitions, which allow the transition\n of artifacts from one stage to the next in a pipeline after the actions in one stage\n complete.

\n

You can work with transitions by calling:

\n
    \n
  • \n

    \n DisableStageTransition, which prevents artifacts from\n transitioning to the next stage in a pipeline.

    \n
  • \n
  • \n

    \n EnableStageTransition, which enables transition of artifacts\n between stages in a pipeline.

    \n
  • \n
\n

\n Using the API to integrate with CodePipeline\n

\n

For third-party integrators or developers who want to create their own integrations\n with CodePipeline, the expected sequence varies from the standard API user. To\n integrate with CodePipeline, developers need to work with the following\n items:

\n

\n Jobs, which are instances of an action. For\n example, a job for a source action might import a revision of an artifact from a source.

\n

You can work with jobs by calling:

\n
    \n
  • \n

    \n AcknowledgeJob, which confirms whether a job worker has\n received the specified job.

    \n
  • \n
  • \n

    \n GetJobDetails, which returns the details of a job.

    \n
  • \n
  • \n

    \n PollForJobs, which determines whether there are any jobs to\n act on.

    \n
  • \n
  • \n

    \n PutJobFailureResult, which provides details of a job failure.\n

    \n
  • \n
  • \n

    \n PutJobSuccessResult, which provides details of a job\n success.

    \n
  • \n
\n

\n Third party jobs, which are instances of an action\n created by a partner action and integrated into CodePipeline. Partner actions are\n created by members of the Amazon Web Services Partner Network.

\n

You can work with third party jobs by calling:

\n ", "smithy.api#title": "AWS CodePipeline", "smithy.api#xmlNamespace": { "uri": "http://codepipeline.amazonaws.com/doc/2015-07-09/" @@ -2863,6 +2881,27 @@ } } }, + "com.amazonaws.codepipeline#Command": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.codepipeline#CommandList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#Command" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, "com.amazonaws.codepipeline#ConcurrentModificationException": { "type": "structure", "members": { @@ -2893,7 +2932,7 @@ "result": { "target": "com.amazonaws.codepipeline#Result", "traits": { - "smithy.api#documentation": "

The action to be done when the condition is met. For example, rolling back an execution for a failure condition.

" + "smithy.api#documentation": "

The action to be done when the condition is met. For example, rolling back an\n execution for a failure condition.

" } }, "rules": { @@ -2904,7 +2943,7 @@ } }, "traits": { - "smithy.api#documentation": "

The condition for the stage. A condition is made up of the rules and the result for the condition.

" + "smithy.api#documentation": "

The condition for the stage. A condition is made up of the rules and the result for\n the condition.

" } }, "com.amazonaws.codepipeline#ConditionExecution": { @@ -3840,6 +3879,12 @@ "smithy.api#documentation": "

The specified result for when the failure conditions are met, such as rolling back the\n stage.

" } }, + "retryConfiguration": { + "target": "com.amazonaws.codepipeline#RetryConfiguration", + "traits": { + "smithy.api#documentation": "

The retry configuration specifies automatic retry for a failed stage, along with the\n configured retry mode.

" + } + }, "conditions": { "target": "com.amazonaws.codepipeline#ConditionList", "traits": { @@ -3920,6 +3965,27 @@ } } }, + "com.amazonaws.codepipeline#FilePath": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.codepipeline#FilePathList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#FilePath" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.codepipeline#GetActionType": { "type": "operation", "input": { @@ -5454,7 +5520,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists the rule executions that have occurred in a pipeline configured for conditions with rules.

", + "smithy.api#documentation": "

Lists the rule executions that have occurred in a pipeline configured for conditions\n with rules.

", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -5488,7 +5554,7 @@ "nextToken": { "target": "com.amazonaws.codepipeline#NextToken", "traits": { - "smithy.api#documentation": "

The token that was returned from the previous ListRuleExecutions\n call, which can be used to return the next set of rule executions in the\n list.

" + "smithy.api#documentation": "

The token that was returned from the previous ListRuleExecutions call,\n which can be used to return the next set of rule executions in the list.

" } } }, @@ -5508,7 +5574,7 @@ "nextToken": { "target": "com.amazonaws.codepipeline#NextToken", "traits": { - "smithy.api#documentation": "

A token that can be used in the next ListRuleExecutions call. To\n view all items in the list, continue to call this operation with each subsequent token\n until no more nextToken values are returned.

" + "smithy.api#documentation": "

A token that can be used in the next ListRuleExecutions call. To view all\n items in the list, continue to call this operation with each subsequent token until no\n more nextToken values are returned.

" } } }, @@ -5894,6 +5960,12 @@ "smithy.api#documentation": "

The name of the output of an artifact, such as \"My App\".

\n

The input artifact of an action must exactly match the output artifact declared in\n a preceding action, but the input artifact does not have to be the next action in strict\n sequence from the action that provided the output artifact. Actions in parallel can\n declare different output artifacts, which are in turn consumed by different following\n actions.

\n

Output artifact names must be unique within a pipeline.

", "smithy.api#required": {} } + }, + "files": { + "target": "com.amazonaws.codepipeline#FilePathList", + "traits": { + "smithy.api#documentation": "

The files that you want to associate with the output artifact that will be exported\n from the compute action.

" + } } }, "traits": { @@ -5906,6 +5978,27 @@ "target": "com.amazonaws.codepipeline#OutputArtifact" } }, + "com.amazonaws.codepipeline#OutputVariable": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.codepipeline#OutputVariableList": { + "type": "list", + "member": { + "target": "com.amazonaws.codepipeline#OutputVariable" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 15 + } + } + }, "com.amazonaws.codepipeline#OutputVariablesKey": { "type": "string", "traits": { @@ -5998,7 +6091,7 @@ "conditionType": { "target": "com.amazonaws.codepipeline#ConditionType", "traits": { - "smithy.api#documentation": "

The type of condition to override for the stage, such as entry conditions, failure conditions, or success conditions.

", + "smithy.api#documentation": "

The type of condition to override for the stage, such as entry conditions, failure\n conditions, or success conditions.

", "smithy.api#required": {} } } @@ -7517,9 +7610,43 @@ "traits": { "smithy.api#enumValue": "FAIL" } + }, + "RETRY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RETRY" + } + }, + "SKIP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIP" + } } } }, + "com.amazonaws.codepipeline#RetryAttempt": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.codepipeline#RetryConfiguration": { + "type": "structure", + "members": { + "retryMode": { + "target": "com.amazonaws.codepipeline#StageRetryMode", + "traits": { + "smithy.api#documentation": "

The method that you want to configure for automatic stage retry on stage\n failure. You can specify to retry only failed action in the stage or all actions in the\n stage.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The retry configuration specifies automatic retry for a failed stage, along with the\n configured retry mode.

" + } + }, "com.amazonaws.codepipeline#RetryStageExecution": { "type": "operation", "input": { @@ -7607,6 +7734,49 @@ "smithy.api#output": {} } }, + "com.amazonaws.codepipeline#RetryStageMetadata": { + "type": "structure", + "members": { + "autoStageRetryAttempt": { + "target": "com.amazonaws.codepipeline#RetryAttempt", + "traits": { + "smithy.api#documentation": "

The number of attempts for a specific stage with automatic retry on stage failure. One attempt is allowed for automatic stage retry on failure.

" + } + }, + "manualStageRetryAttempt": { + "target": "com.amazonaws.codepipeline#RetryAttempt", + "traits": { + "smithy.api#documentation": "

The number of attempts for a specific stage where manual retries have been made upon stage failure.

" + } + }, + "latestRetryTrigger": { + "target": "com.amazonaws.codepipeline#RetryTrigger", + "traits": { + "smithy.api#documentation": "

The latest trigger for a specific stage where manual or automatic retries have been made upon stage failure.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a specific automatic retry on stage failure, including the attempt number and trigger.

" + } + }, + "com.amazonaws.codepipeline#RetryTrigger": { + "type": "enum", + "members": { + "AutomatedStageRetry": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AutomatedStageRetry" + } + }, + "ManualStageRetry": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ManualStageRetry" + } + } + } + }, "com.amazonaws.codepipeline#Revision": { "type": "string", "traits": { @@ -7796,7 +7966,7 @@ "target": "com.amazonaws.codepipeline#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Indicates whether the property can be queried.

\n

If you create a pipeline with a condition and rule, and that rule contains a queryable property, the value for that configuration property is subject to other\n restrictions. The value must be less than or equal to twenty (20) characters. The value\n can contain only alphanumeric characters, underscores, and hyphens.

" + "smithy.api#documentation": "

Indicates whether the property can be queried.

\n

If you create a pipeline with a condition and rule, and that rule contains a\n queryable property, the value for that configuration property is subject to other\n restrictions. The value must be less than or equal to twenty (20) characters. The value\n can contain only alphanumeric characters, underscores, and hyphens.

" } }, "description": { @@ -7866,14 +8036,14 @@ "name": { "target": "com.amazonaws.codepipeline#RuleName", "traits": { - "smithy.api#documentation": "

The name of the rule that is created for the condition, such as CheckAllResults.

", + "smithy.api#documentation": "

The name of the rule that is created for the condition, such as\n CheckAllResults.

", "smithy.api#required": {} } }, "ruleTypeId": { "target": "com.amazonaws.codepipeline#RuleTypeId", "traits": { - "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

", + "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner,\n provider, and version.

", "smithy.api#required": {} } }, @@ -7886,7 +8056,7 @@ "inputArtifacts": { "target": "com.amazonaws.codepipeline#InputArtifactList", "traits": { - "smithy.api#documentation": "

The input artifacts fields for the rule, such as specifying an input file for the rule.

" + "smithy.api#documentation": "

The input artifacts fields for the rule, such as specifying an input file for the\n rule.

" } }, "roleArn": { @@ -7909,7 +8079,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents information about the rule to be created for an associated condition. An example would be creating a new rule for an entry condition, such as a rule that checks for a test result before allowing the run to enter the deployment stage.

" + "smithy.api#documentation": "

Represents information about the rule to be created for an associated condition. An\n example would be creating a new rule for an entry condition, such as a rule that checks\n for a test result before allowing the run to enter the deployment stage.

" } }, "com.amazonaws.codepipeline#RuleDeclarationList": { @@ -7980,7 +8150,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents information about each time a rule is run as part of the pipeline execution for a pipeline configured with conditions.

" + "smithy.api#documentation": "

Represents information about each time a rule is run as part of the pipeline execution\n for a pipeline configured with conditions.

" } }, "com.amazonaws.codepipeline#RuleExecutionDetail": { @@ -8037,7 +8207,7 @@ "status": { "target": "com.amazonaws.codepipeline#RuleExecutionStatus", "traits": { - "smithy.api#documentation": "

The status of the rule execution. Status categories are InProgress,\n Succeeded, and Failed.\n

" + "smithy.api#documentation": "

The status of the rule execution. Status categories are InProgress,\n Succeeded, and Failed.

" } }, "input": { @@ -8054,7 +8224,7 @@ } }, "traits": { - "smithy.api#documentation": "

The details of the runs for a rule and the results produced on an artifact as it passes\n through stages in the pipeline.

" + "smithy.api#documentation": "

The details of the runs for a rule and the results produced on an artifact as it\n passes through stages in the pipeline.

" } }, "com.amazonaws.codepipeline#RuleExecutionDetailList": { @@ -8095,13 +8265,13 @@ "ruleTypeId": { "target": "com.amazonaws.codepipeline#RuleTypeId", "traits": { - "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner,\n provider, and version.

" } }, "configuration": { "target": "com.amazonaws.codepipeline#RuleConfigurationMap", "traits": { - "smithy.api#documentation": "

Configuration data for a rule execution, such as the resolved values for that run.

" + "smithy.api#documentation": "

Configuration data for a rule execution, such as the resolved values for that\n run.

" } }, "resolvedConfiguration": { @@ -8125,7 +8295,7 @@ "inputArtifacts": { "target": "com.amazonaws.codepipeline#ArtifactDetailList", "traits": { - "smithy.api#documentation": "

Details of input artifacts of the rule that correspond to the rule \n execution.

" + "smithy.api#documentation": "

Details of input artifacts of the rule that correspond to the rule execution.

" } } }, @@ -8139,12 +8309,12 @@ "executionResult": { "target": "com.amazonaws.codepipeline#RuleExecutionResult", "traits": { - "smithy.api#documentation": "

Execution result information listed in the output details for a rule\n execution.

" + "smithy.api#documentation": "

Execution result information listed in the output details for a rule execution.

" } } }, "traits": { - "smithy.api#documentation": "

Output details listed for a rule execution, such as the rule execution\n result.

" + "smithy.api#documentation": "

Output details listed for a rule execution, such as the rule execution result.

" } }, "com.amazonaws.codepipeline#RuleExecutionResult": { @@ -8252,21 +8422,21 @@ "revisionId": { "target": "com.amazonaws.codepipeline#Revision", "traits": { - "smithy.api#documentation": "

The system-generated unique ID that identifies the revision number of the\n rule.

", + "smithy.api#documentation": "

The system-generated unique ID that identifies the revision number of the rule.

", "smithy.api#required": {} } }, "revisionChangeId": { "target": "com.amazonaws.codepipeline#RevisionChangeIdentifier", "traits": { - "smithy.api#documentation": "

The unique identifier of the change that set the state to this revision (for\n example, a deployment ID or timestamp).

", + "smithy.api#documentation": "

The unique identifier of the change that set the state to this revision (for example,\n a deployment ID or timestamp).

", "smithy.api#required": {} } }, "created": { "target": "com.amazonaws.codepipeline#Timestamp", "traits": { - "smithy.api#documentation": "

The date and time when the most recent version of the rule was created, in\n timestamp format.

", + "smithy.api#documentation": "

The date and time when the most recent version of the rule was created, in timestamp\n format.

", "smithy.api#required": {} } } @@ -8299,7 +8469,7 @@ "entityUrl": { "target": "com.amazonaws.codepipeline#Url", "traits": { - "smithy.api#documentation": "

A URL link for more information about the state of the action, such as a details page.

" + "smithy.api#documentation": "

A URL link for more information about the state of the action, such as a details\n page.

" } }, "revisionUrl": { @@ -8310,7 +8480,7 @@ } }, "traits": { - "smithy.api#documentation": "

Returns information about the state of a rule.

\n \n

Values returned in the revisionId field indicate the rule revision information, such as the commit ID, for the current state.

\n
" + "smithy.api#documentation": "

Returns information about the state of a rule.

\n \n

Values returned in the revisionId field indicate the rule revision\n information, such as the commit ID, for the current state.

\n
" } }, "com.amazonaws.codepipeline#RuleStateList": { @@ -8358,7 +8528,7 @@ } }, "traits": { - "smithy.api#documentation": "

The rule type, which is made up of the combined values for category, owner, provider, and version.

" + "smithy.api#documentation": "

The rule type, which is made up of the combined values for category, owner, provider,\n and version.

" } }, "com.amazonaws.codepipeline#RuleTypeId": { @@ -8367,14 +8537,14 @@ "category": { "target": "com.amazonaws.codepipeline#RuleCategory", "traits": { - "smithy.api#documentation": "

A category defines what kind of rule can be run in the stage, and constrains the provider\n type for the rule. The valid category is Rule.

", + "smithy.api#documentation": "

A category defines what kind of rule can be run in the stage, and constrains the\n provider type for the rule. The valid category is Rule.

", "smithy.api#required": {} } }, "owner": { "target": "com.amazonaws.codepipeline#RuleOwner", "traits": { - "smithy.api#documentation": "

The creator of the rule being called. The valid value for the\n Owner field in the rule category is AWS.

" + "smithy.api#documentation": "

The creator of the rule being called. The valid value for the Owner field\n in the rule category is AWS.

" } }, "provider": { @@ -8392,7 +8562,7 @@ } }, "traits": { - "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner, provider, and version.

" + "smithy.api#documentation": "

The ID for the rule type, which is made up of the combined values for category, owner,\n provider, and version.

" } }, "com.amazonaws.codepipeline#RuleTypeList": { @@ -8407,7 +8577,7 @@ "thirdPartyConfigurationUrl": { "target": "com.amazonaws.codepipeline#Url", "traits": { - "smithy.api#documentation": "

The URL of a sign-up page where users can sign up for an external service and\n perform initial configuration of the action provided by that service.

" + "smithy.api#documentation": "

The URL of a sign-up page where users can sign up for an external service and perform\n initial configuration of the action provided by that service.

" } }, "entityUrlTemplate": { @@ -8585,7 +8755,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution.

\n \n

For the S3_OBJECT_VERSION_ID and S3_OBJECT_KEY types of source revisions, either\n of the types can be used independently, or they can be used together to override the\n source with a specific ObjectKey and VersionID.

\n
" + "smithy.api#documentation": "

A list that allows you to specify, or override, the source revision for a pipeline\n execution that's being started. A source revision is the version with all the changes to\n your application code, or source artifact, for the pipeline execution.

\n \n

For the S3_OBJECT_VERSION_ID and S3_OBJECT_KEY types of\n source revisions, either of the types can be used independently, or they can be used\n together to override the source with a specific ObjectKey and VersionID.

\n
" } }, "com.amazonaws.codepipeline#SourceRevisionOverrideList": { @@ -8727,13 +8897,13 @@ "onSuccess": { "target": "com.amazonaws.codepipeline#SuccessConditions", "traits": { - "smithy.api#documentation": "

The method to use when a stage has succeeded. For example,\n configuring this field for conditions will allow the stage to succeed when the conditions are met.

" + "smithy.api#documentation": "

The method to use when a stage has succeeded. For example, configuring this field for\n conditions will allow the stage to succeed when the conditions are met.

" } }, "beforeEntry": { "target": "com.amazonaws.codepipeline#BeforeEntryConditions", "traits": { - "smithy.api#documentation": "

The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met.

" + "smithy.api#documentation": "

The method to use when a stage allows entry. For example, configuring this field for\n conditions will allow entry to the stage when the conditions are met.

" } } }, @@ -8813,6 +8983,12 @@ "traits": { "smithy.api#enumValue": "Succeeded" } + }, + "Skipped": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Skipped" + } } } }, @@ -8926,6 +9102,12 @@ "traits": { "smithy.api#documentation": "

The state of the failure conditions for a stage.

" } + }, + "retryStageMetadata": { + "target": "com.amazonaws.codepipeline#RetryStageMetadata", + "traits": { + "smithy.api#documentation": "

he details of a specific automatic retry on stage failure, including the attempt number and trigger.

" + } } }, "traits": { diff --git a/models/connect.json b/models/connect.json index eecb6ace4e..2f5c1aea08 100644 --- a/models/connect.json +++ b/models/connect.json @@ -1454,6 +1454,9 @@ { "target": "com.amazonaws.connect#StartContactStreaming" }, + { + "target": "com.amazonaws.connect#StartOutboundChatContact" + }, { "target": "com.amazonaws.connect#StartOutboundVoiceContact" }, @@ -2708,7 +2711,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Associates the specified dataset for a Amazon Connect instance with the target account.\n You can associate only one dataset in a single call.

", + "smithy.api#documentation": "

Associates the specified dataset for a Amazon Connect instance with the target account.\n You can associate only one dataset in a single call.

", "smithy.api#http": { "method": "PUT", "uri": "/analytics-data/instance/{InstanceId}/association", @@ -4274,7 +4277,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Associates a list of analytics datasets for a given Amazon Connect instance to a target\n account. You can associate multiple datasets in a single call.

", + "smithy.api#documentation": "

Associates a list of analytics datasets for a given Amazon Connect instance to a target\n account. You can associate multiple datasets in a single call.

", "smithy.api#http": { "method": "PUT", "uri": "/analytics-data/instance/{InstanceId}/associations", @@ -4357,7 +4360,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Removes a list of analytics datasets associated with a given Amazon Connect instance.\n You can disassociate multiple datasets in a single call.

", + "smithy.api#documentation": "

Removes a list of analytics datasets associated with a given Amazon Connect instance.\n You can disassociate multiple datasets in a single call.

", "smithy.api#http": { "method": "POST", "uri": "/analytics-data/instance/{InstanceId}/associations", @@ -11304,7 +11307,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Describes the specified contact.

\n \n

Contact information remains available in Amazon Connect for 24 months, and then it is\n deleted.

\n

Only data from November 12, 2021, and later is returned by this\n API.

\n
", + "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Describes the specified contact.

\n \n

Contact information remains available in Amazon Connect for 24 months from the\n InitiationTimestamp, and then it is deleted. Only contact information that is available in\n Amazon Connect is returned by this API

\n
", "smithy.api#http": { "method": "GET", "uri": "/contacts/{InstanceId}/{ContactId}", @@ -13124,7 +13127,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Removes the dataset ID associated with a given Amazon Connect instance.

", + "smithy.api#documentation": "

Removes the dataset ID associated with a given Amazon Connect instance.

", "smithy.api#http": { "method": "POST", "uri": "/analytics-data/instance/{InstanceId}/association", @@ -14234,6 +14237,12 @@ "traits": { "smithy.api#enumValue": "CONTACT_FLOW" } + }, + "CONNECT_PHONENUMBER_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CONNECT_PHONENUMBER_ARN" + } } } }, @@ -16934,7 +16943,7 @@ "Metrics": { "target": "com.amazonaws.connect#MetricsV2", "traits": { - "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_DIALS_PER_MINUTE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile

\n

UI name: Average dials per minute\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Campaign

\n

UI name: Average wait time after customer connection\n

\n
\n
CAMPAIGN_CONTACTS_ABANDONED_AFTER_X
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter GT (for\n Greater than).

\n

UI name: Campaign contacts abandoned after X\n

\n
\n
CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Percent

\n

Valid groupings and filters: Campaign, Agent

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter GT (for\n Greater than).

\n

UI name: Campaign contacts abandoned after X rate\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_REMOVED_FROM_QUEUE_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts removed from queue in X seconds\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
DELIVERY_ATTEMPTS
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status,\n Disconnect Reason

\n

UI name: Delivery attempts\n

\n
\n
DELIVERY_ATTEMPT_DISPOSITION_RATE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.

\n

Unit: Percent

\n

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason

\n \n

Answering Machine Detection Status and Disconnect Reason are valid filters but not valid\n groupings.

\n
\n

UI name: Delivery attempt disposition rate\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
HUMAN_ANSWERED_CALLS
\n
\n

This metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent

\n

UI name: Human answered\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter LT (for\n \"Less than\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", + "smithy.api#documentation": "

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The\n following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator\n Guide.

\n
\n
ABANDONMENT_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Abandonment rate\n

\n
\n
AGENT_ADHERENT_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherent time\n

\n
\n
AGENT_ANSWER_RATE
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent answer rate\n

\n
\n
AGENT_NON_ADHERENT_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-adherent time\n

\n
\n
AGENT_NON_RESPONSE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent\n non-response\n

\n
\n
AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

\n

UI name: Agent non-response without customer abandons\n

\n
\n
AGENT_OCCUPANCY
\n
\n

Unit: Percentage

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Occupancy\n

\n
\n
AGENT_SCHEDULE_ADHERENCE
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Adherence\n

\n
\n
AGENT_SCHEDULED_TIME
\n
\n

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Scheduled time\n

\n
\n
AVG_ABANDON_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue abandon time\n

\n
\n
AVG_ACTIVE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average active time\n

\n
\n
AVG_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average after contact work time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_AGENT_CONNECTING_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. For now, this metric only\n supports the following as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Average agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
AVG_AGENT_PAUSE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Average agent pause time\n

\n
\n
AVG_CASE_RELATED_CONTACTS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average contacts per case\n

\n
\n
AVG_CASE_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Average case resolution time\n

\n
\n
AVG_CONTACT_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average contact duration\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_CONVERSATION_DURATION
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average conversation duration\n

\n
\n
AVG_DIALS_PER_MINUTE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile

\n

UI name: Average dials per minute\n

\n
\n
AVG_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Average flow time\n

\n
\n
AVG_GREETING_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent greeting time\n

\n
\n
AVG_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression

\n

UI name: Average handle time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_HOLD_TIME_ALL_CONTACTS
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer hold time all contacts\n

\n
\n
AVG_HOLDS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average holds\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction and customer hold time\n

\n
\n
AVG_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interaction time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_INTERRUPTIONS_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruptions\n

\n
\n
AVG_INTERRUPTION_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent interruption time\n

\n
\n
AVG_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average non-talk time\n

\n
\n
AVG_QUEUE_ANSWER_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average queue answer time\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
AVG_RESOLUTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Average resolution time\n

\n
\n
AVG_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average talk time\n

\n
\n
AVG_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average agent talk time\n

\n
\n
AVG_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Average customer talk time\n

\n
\n
AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Seconds

\n

Valid groupings and filters: Campaign

\n

UI name: Average wait time after customer connection\n

\n
\n
CAMPAIGN_CONTACTS_ABANDONED_AFTER_X
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter GT (for\n Greater than).

\n

UI name: Campaign contacts abandoned after X\n

\n
\n
CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Percent

\n

Valid groupings and filters: Campaign, Agent

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you must enter GT (for\n Greater than).

\n

UI name: Campaign contacts abandoned after X rate\n

\n
\n
CASES_CREATED
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases created\n

\n
\n
CONTACTS_CREATED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts created\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: API contacts handled\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT
\n
\n

Unit: Count

\n

Valid metric filter key: INITIATION_METHOD\n

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts handled (connected to agent timestamp)\n

\n
\n
CONTACTS_HOLD_ABANDONS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts hold disconnect\n

\n
\n
CONTACTS_ON_HOLD_AGENT_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold agent disconnect\n

\n
\n
CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts hold customer disconnect\n

\n
\n
CONTACTS_PUT_ON_HOLD
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts put on hold\n

\n
\n
CONTACTS_TRANSFERRED_OUT_EXTERNAL
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out external\n

\n
\n
CONTACTS_TRANSFERRED_OUT_INTERNAL
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contacts transferred out internal\n

\n
\n
CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts queued\n

\n
\n
CONTACTS_QUEUED_BY_ENQUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype

\n

UI name: Contacts queued (enqueue timestamp)\n

\n
\n
CONTACTS_REMOVED_FROM_QUEUE_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you can use LT (for \"Less\n than\") or LTE (for \"Less than equal\").

\n

UI name: Contacts removed from queue in X seconds\n

\n
\n
CONTACTS_RESOLVED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you can use LT (for \"Less\n than\") or LTE (for \"Less than equal\").

\n

UI name: Contacts resolved in X\n

\n
\n
CONTACTS_TRANSFERRED_OUT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out\n

\n \n

Feature is a valid filter but not a valid grouping.

\n
\n
\n
CONTACTS_TRANSFERRED_OUT_BY_AGENT
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out by agent\n

\n
\n
CONTACTS_TRANSFERRED_OUT_FROM_QUEUE
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contacts transferred out queue\n

\n
\n
CURRENT_CASES
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Current cases\n

\n
\n
DELIVERY_ATTEMPTS
\n
\n

This metric is available only for contacts analyzed by outbound campaigns\n analytics.

\n

Unit: Count

\n

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status,\n Disconnect Reason

\n

UI name: Delivery attempts\n

\n
\n
DELIVERY_ATTEMPT_DISPOSITION_RATE
\n
\n

This metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.

\n

Unit: Percent

\n

Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS,\n DISCONNECT_REASON\n

\n

Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason

\n \n

Answering Machine Detection Status and Disconnect Reason are valid filters but not valid\n groupings.

\n
\n

UI name: Delivery attempt disposition rate\n

\n
\n
FLOWS_OUTCOME
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome\n

\n
\n
FLOWS_STARTED
\n
\n

Unit: Count

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows started\n

\n
\n
HUMAN_ANSWERED_CALLS
\n
\n

This metric is available only for contacts analyzed by outbound campaigns analytics, and\n with the answering machine detection enabled.

\n

Unit: Count

\n

Valid groupings and filters: Campaign, Agent

\n

UI name: Human answered\n

\n
\n
MAX_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Maximum flow time\n

\n
\n
MAX_QUEUED_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Maximum queued time\n

\n
\n
MIN_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Minimum flow time\n

\n
\n
PERCENT_CASES_FIRST_CONTACT_RESOLVED
\n
\n

Unit: Percent

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved on first contact\n

\n
\n
PERCENT_CONTACTS_STEP_EXPIRED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_CONTACTS_STEP_JOINED
\n
\n

Unit: Percent

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
PERCENT_FLOWS_OUTCOME
\n
\n

Unit: Percent

\n

Valid metric filter key: FLOWS_OUTCOME_TYPE\n

\n

Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module\n resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows\n resource ID, Initiation method, Resource published timestamp

\n

UI name: Flows outcome percentage.

\n \n

The FLOWS_OUTCOME_TYPE is not a valid grouping.

\n
\n
\n
PERCENT_NON_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Non-talk\n time percent\n

\n
\n
PERCENT_TALK_TIME
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Talk time\n percent\n

\n
\n
PERCENT_TALK_TIME_AGENT
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Agent\n talk time percent\n

\n
\n
PERCENT_TALK_TIME_CUSTOMER
\n
\n

This metric is available only for contacts analyzed by Contact Lens conversational\n analytics.

\n

Unit: Percentage

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Customer talk time percent\n

\n
\n
REOPENED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases reopened\n

\n
\n
RESOLVED_CASE_ACTIONS
\n
\n

Unit: Count

\n

Required filter key: CASE_TEMPLATE_ARN

\n

Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS

\n

UI name: Cases resolved\n

\n
\n
SERVICE_LEVEL
\n
\n

You can include up to 20 SERVICE_LEVEL metrics in a request.

\n

Unit: Percent

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you can use LT (for \"Less\n than\") or LTE (for \"Less than equal\").

\n

UI name: Service level X\n

\n
\n
STEP_CONTACTS_QUEUED
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, RoutingStepExpression

\n

UI name: This metric is available in Real-time Metrics UI but not on the Historical\n Metrics UI.

\n
\n
SUM_AFTER_CONTACT_WORK_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: After\n contact work time\n

\n
\n
SUM_CONNECTING_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid metric filter key: INITIATION_METHOD. This metric only supports the\n following filter keys as INITIATION_METHOD: INBOUND |\n OUTBOUND | CALLBACK | API\n

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent API connecting time\n

\n \n

The Negate key in Metric Level Filters is not applicable for this\n metric.

\n
\n
\n
CONTACTS_ABANDONED
\n
\n

Unit: Count

\n

Metric filter:

\n
    \n
  • \n

    Valid values: API| Incoming | Outbound |\n Transfer | Callback | Queue_Transfer|\n Disconnect\n

    \n
  • \n
\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect

\n

UI name: Contact abandoned\n

\n
\n
SUM_CONTACTS_ABANDONED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you can use LT (for \"Less\n than\") or LTE (for \"Less than equal\").

\n

UI name: Contacts abandoned in X seconds\n

\n
\n
SUM_CONTACTS_ANSWERED_IN_X
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

Threshold: For ThresholdValue, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison, you can use LT (for \"Less\n than\") or LTE (for \"Less than equal\").

\n

UI name: Contacts answered in X seconds\n

\n
\n
SUM_CONTACT_FLOW_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact flow time\n

\n
\n
SUM_CONTACT_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent on contact time\n

\n
\n
SUM_CONTACTS_DISCONNECTED
\n
\n

Valid metric filter key: DISCONNECT_REASON\n

\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy,\n contact/segmentAttributes/connect:Subtype, Q in Connect

\n

UI name: Contact disconnected\n

\n
\n
SUM_ERROR_STATUS_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Error status time\n

\n
\n
SUM_HANDLE_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Contact handle time\n

\n
\n
SUM_HOLD_TIME
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Customer hold time\n

\n
\n
SUM_IDLE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent idle time\n

\n
\n
SUM_INTERACTION_AND_HOLD_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect

\n

UI name: Agent interaction and hold time\n

\n
\n
SUM_INTERACTION_TIME
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

\n

UI name: Agent interaction time\n

\n
\n
SUM_NON_PRODUCTIVE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Non-Productive Time\n

\n
\n
SUM_ONLINE_TIME_AGENT
\n
\n

Unit: Seconds

\n

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

\n

UI name: Online time\n

\n
\n
SUM_RETRY_CALLBACK_ATTEMPTS
\n
\n

Unit: Count

\n

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype,\n Q in Connect

\n

UI name: Callback attempts\n

\n
\n
", "smithy.api#required": {} } }, @@ -17362,7 +17371,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 3 + "max": 4 } } }, @@ -19599,7 +19608,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API is in preview release for Amazon Connect and is subject to change.

\n

Lists the association status of requested dataset ID for a given Amazon Connect\n instance.

", + "smithy.api#documentation": "

Lists the association status of requested dataset ID for a given Amazon Connect\n instance.

", "smithy.api#http": { "method": "GET", "uri": "/analytics-data/instance/{InstanceId}/association", @@ -24195,7 +24204,7 @@ "MetricFilterValues": { "target": "com.amazonaws.connect#MetricFilterValueList", "traits": { - "smithy.api#documentation": "

The values to use for filtering data.

\n

Valid metric filter values for INITIATION_METHOD: INBOUND |\n OUTBOUND | TRANSFER | QUEUE_TRANSFER |\n CALLBACK | API\n

\n

Valid metric filter values for DISCONNECT_REASON:\n CUSTOMER_DISCONNECT | AGENT_DISCONNECT |\n THIRD_PARTY_DISCONNECT | TELECOM_PROBLEM | BARGED |\n CONTACT_FLOW_DISCONNECT | OTHER | EXPIRED |\n API\n

" + "smithy.api#documentation": "

The values to use for filtering data.

\n

Valid metric filter values for INITIATION_METHOD: INBOUND |\n OUTBOUND | TRANSFER | QUEUE_TRANSFER |\n CALLBACK | API | WEBRTC_API | MONITOR |\n DISCONNECT | EXTERNAL_OUTBOUND\n

\n

Valid metric filter values for DISCONNECT_REASON:\n CUSTOMER_DISCONNECT | AGENT_DISCONNECT |\n THIRD_PARTY_DISCONNECT | TELECOM_PROBLEM | BARGED |\n CONTACT_FLOW_DISCONNECT | OTHER | EXPIRED |\n API\n

" } }, "Negate": { @@ -24744,7 +24753,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the property value used in automation of a numeric questions. Label values\n are associated with minimum and maximum values for the numeric question.

\n
    \n
  • \n

    Sentiment scores have a minimum value of -5 and maximum value of 5.

    \n
  • \n
  • \n

    Duration labels, such as NON_TALK_TIME, CONTACT_DURATION,\n AGENT_INTERACTION_DURATION, CUSTOMER_HOLD_TIME have a minimum value\n of 0 and maximum value of 28800.

    \n
  • \n
  • \n

    Percentages have a minimum value of 0 and maximum value of 100.

    \n
  • \n
  • \n

    \n NUMBER_OF_INTERRUPTIONS has a minimum value of 0 and maximum value of\n 1000.

    \n
  • \n
" + "smithy.api#documentation": "

Information about the property value used in automation of a numeric questions. Label values\n are associated with minimum and maximum values for the numeric question.

\n
    \n
  • \n

    Sentiment scores have a minimum value of -5 and maximum value of 5.

    \n
  • \n
  • \n

    Duration labels, such as NON_TALK_TIME, CONTACT_DURATION,\n AGENT_INTERACTION_DURATION, CUSTOMER_HOLD_TIME have a minimum value\n of 0 and maximum value of 63072000.

    \n
  • \n
  • \n

    Percentages have a minimum value of 0 and maximum value of 100.

    \n
  • \n
  • \n

    \n NUMBER_OF_INTERRUPTIONS has a minimum value of 0 and maximum value of\n 1000.

    \n
  • \n
" } }, "com.amazonaws.connect#OperatingSystem": { @@ -33666,6 +33675,138 @@ "smithy.api#output": {} } }, + "com.amazonaws.connect#StartOutboundChatContact": { + "type": "operation", + "input": { + "target": "com.amazonaws.connect#StartOutboundChatContactRequest" + }, + "output": { + "target": "com.amazonaws.connect#StartOutboundChatContactResponse" + }, + "errors": [ + { + "target": "com.amazonaws.connect#AccessDeniedException" + }, + { + "target": "com.amazonaws.connect#ConflictException" + }, + { + "target": "com.amazonaws.connect#InternalServiceException" + }, + { + "target": "com.amazonaws.connect#InvalidRequestException" + }, + { + "target": "com.amazonaws.connect#LimitExceededException" + }, + { + "target": "com.amazonaws.connect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.connect#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Initiates a new outbound SMS contact to a customer. Response of this API provides the\n ContactId of the outbound SMS contact created.

\n

\n SourceEndpoint only supports Endpoints with\n CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as\n Type. ContactFlowId initiates the flow to manage the new SMS\n contact created.

\n

This API can be used to initiate outbound SMS contacts for an agent or it can also deflect\n an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action.

\n

For more information about using SMS in Amazon Connect, see the following topics in the\n Amazon Connect Administrator Guide:

\n ", + "smithy.api#http": { + "method": "PUT", + "uri": "/contact/outbound-chat", + "code": 200 + } + } + }, + "com.amazonaws.connect#StartOutboundChatContactRequest": { + "type": "structure", + "members": { + "SourceEndpoint": { + "target": "com.amazonaws.connect#Endpoint", + "traits": { + "smithy.api#required": {} + } + }, + "DestinationEndpoint": { + "target": "com.amazonaws.connect#Endpoint", + "traits": { + "smithy.api#required": {} + } + }, + "InstanceId": { + "target": "com.amazonaws.connect#InstanceId", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instance ID in the\n Amazon Resource Name (ARN) of the instance.

", + "smithy.api#required": {} + } + }, + "SegmentAttributes": { + "target": "com.amazonaws.connect#SegmentAttributes", + "traits": { + "smithy.api#documentation": "

A set of system defined key-value pairs stored on individual contact segments using an\n attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in\n flows.

\n
    \n
  • \n

    Attribute keys can include only alphanumeric, -, and _.

    \n
  • \n
  • \n

    This field can be used to show channel subtype, such as connect:Guide and\n connect:SMS.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "Attributes": { + "target": "com.amazonaws.connect#Attributes", + "traits": { + "smithy.api#documentation": "

A custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in flows just like any other contact attributes.

" + } + }, + "ContactFlowId": { + "target": "com.amazonaws.connect#ContactFlowId", + "traits": { + "smithy.api#documentation": "

The identifier of the flow for the call. To see the ContactFlowId in the Amazon Connect\n console user interface, on the navigation menu go to Routing, Contact\n Flows. Choose the flow. On the flow page, under the name of the flow, choose\n Show additional flow information. The ContactFlowId is the last\n part of the ARN, shown here in bold:

\n
    \n
  • \n

    arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/123ec456-a007-89c0-1234-xxxxxxxxxxxx\n

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "ChatDurationInMinutes": { + "target": "com.amazonaws.connect#ChatDurationInMinutes", + "traits": { + "smithy.api#documentation": "

The total duration of the newly started chat session. If not specified, the chat session\n duration defaults to 25 hour. The minimum configurable time is 60 minutes. The maximum\n configurable time is 10,080 minutes (7 days).

" + } + }, + "ParticipantDetails": { + "target": "com.amazonaws.connect#ParticipantDetails" + }, + "InitialSystemMessage": { + "target": "com.amazonaws.connect#ChatMessage" + }, + "RelatedContactId": { + "target": "com.amazonaws.connect#ContactId", + "traits": { + "smithy.api#documentation": "

The unique identifier for an Amazon Connect contact. This identifier is related to the\n contact starting.

" + } + }, + "SupportedMessagingContentTypes": { + "target": "com.amazonaws.connect#SupportedMessagingContentTypes", + "traits": { + "smithy.api#documentation": "

The supported chat message content types. Supported types are:

\n
    \n
  • \n

    \n text/plain\n

    \n
  • \n
  • \n

    \n text/markdown\n

    \n
  • \n
  • \n

    \n application/json,\n application/vnd.amazonaws.connect.message.interactive\n

    \n
  • \n
  • \n

    \n application/vnd.amazonaws.connect.message.interactive.response\n

    \n
  • \n
\n

Content types must always contain text/plain. You can then put any other\n supported type in the list. For example, all the following lists are valid because they contain\n text/plain:

\n
    \n
  • \n

    \n [text/plain, text/markdown, application/json]\n

    \n
  • \n
  • \n

    \n [text/markdown, text/plain]\n

    \n
  • \n
  • \n

    \n [text/plain, application/json,\n application/vnd.amazonaws.connect.message.interactive.response]\n

    \n
  • \n
" + } + }, + "ClientToken": { + "target": "com.amazonaws.connect#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making\n retries safe with idempotent APIs. The token is valid for 7 days after creation. If a\n contact is already started, the contact ID is returned.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.connect#StartOutboundChatContactResponse": { + "type": "structure", + "members": { + "ContactId": { + "target": "com.amazonaws.connect#ContactId", + "traits": { + "smithy.api#documentation": "

The identifier of this contact within the Amazon Connect instance.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.connect#StartOutboundVoiceContact": { "type": "operation", "input": { @@ -35445,7 +35586,7 @@ "Comparison": { "target": "com.amazonaws.connect#ResourceArnOrId", "traits": { - "smithy.api#documentation": "

The type of comparison. Only \"less than\" (LT) and \"greater than\" (GT) comparisons are\n supported.

" + "smithy.api#documentation": "

The type of comparison. Currently, \"less than\" (LT), \"less than equal\" (LTE), and \"greater\n than\" (GT) comparisons are supported.

" } }, "ThresholdValue": { diff --git a/models/cost-explorer.json b/models/cost-explorer.json index 2800a9bc06..a70595c61b 100644 --- a/models/cost-explorer.json +++ b/models/cost-explorer.json @@ -1020,7 +1020,7 @@ "DimensionValue": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

The dimension for the anomaly (for example, an Amazon Web Service in a service\n monitor).

" + "smithy.api#documentation": "

The dimension for the anomaly (for example, an Amazon Web Servicesservice in a service\n monitor).

" } }, "RootCauses": { @@ -3021,6 +3021,26 @@ "smithy.api#documentation": "

The field that contains a list of disk (local storage) metrics that are associated\n with the current instance.

" } }, + "com.amazonaws.costexplorer#DynamoDBCapacityDetails": { + "type": "structure", + "members": { + "CapacityUnits": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "

The capacity unit of the recommended reservation.

" + } + }, + "Region": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Region of the recommended reservation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The DynamoDB reservations that Amazon Web Services recommends that you purchase.

" + } + }, "com.amazonaws.costexplorer#EBSResourceUtilization": { "type": "structure", "members": { @@ -4398,7 +4418,7 @@ "Context": { "target": "com.amazonaws.costexplorer#Context", "traits": { - "smithy.api#documentation": "

The context for the call to GetDimensionValues. This can be\n RESERVATIONS or COST_AND_USAGE. The default value is\n COST_AND_USAGE. If the context is set to RESERVATIONS, the\n resulting dimension values can be used in the GetReservationUtilization\n operation. If the context is set to COST_AND_USAGE, the resulting dimension\n values can be used in the GetCostAndUsage operation.

\n

If you set the context to COST_AND_USAGE, you can use the following\n dimensions for searching:

\n
    \n
  • \n

    AZ - The Availability Zone. An example is us-east-1a.

    \n
  • \n
  • \n

    BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible\n values are the following:

    \n

    - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services.

    \n

    - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting\n reseller for Amazon Web Services in India.

    \n

    - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on\n Amazon Web Services by third-party software providers.

    \n
  • \n
  • \n

    CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments.\n Valid values are SingleAZ and MultiAZ.

    \n
  • \n
  • \n

    DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are\n Aurora or MySQL.

    \n
  • \n
  • \n

    INSTANCE_TYPE - The type of Amazon EC2 instance. An example is\n m4.xlarge.

    \n
  • \n
  • \n

    INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use\n cases. Examples are Compute Optimized (for example, C4,\n C5, C6g, and C7g), Memory\n Optimization (for example, R4, R5n, R5b,\n and R6g).

    \n
  • \n
  • \n

    INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services\n invoice.

    \n
  • \n
  • \n

    LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services\n services, such as Amazon Web Services.

    \n
  • \n
  • \n

    LINKED_ACCOUNT - The description in the attribute map that includes the full name\n of the member account. The value field contains the Amazon Web Services ID of the member\n account.

    \n
  • \n
  • \n

    OPERATING_SYSTEM - The operating system. Examples are Windows or Linux.

    \n
  • \n
  • \n

    OPERATION - The action performed. Examples include RunInstance and\n CreateBucket.

    \n
  • \n
  • \n

    PLATFORM - The Amazon EC2 operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    PURCHASE_TYPE - The reservation type of the purchase that this usage is related to.\n Examples include On-Demand Instances and Standard Reserved Instances.

    \n
  • \n
  • \n

    RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation\n Instance.

    \n
  • \n
  • \n

    SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.

    \n
  • \n
  • \n

    SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute).

    \n
  • \n
  • \n

    SERVICE - The Amazon Web Services service such as Amazon DynamoDB.

    \n
  • \n
  • \n

    TENANCY - The tenancy of a resource. Examples are shared or dedicated.

    \n
  • \n
  • \n

    USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response\n for the GetDimensionValues operation includes a unit attribute. Examples\n include GB and Hrs.

    \n
  • \n
  • \n

    USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2:\n CloudWatch – Alarms. The response for this operation includes a unit attribute.

    \n
  • \n
  • \n

    REGION - The Amazon Web Services Region.

    \n
  • \n
  • \n

    RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees,\n usage costs, tax refunds, and credits.

    \n
  • \n
  • \n

    RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in\n feature only available for last 14 days for EC2-Compute Service.

    \n
  • \n
\n

If you set the context to RESERVATIONS, you can use the following\n dimensions for searching:

\n
    \n
  • \n

    AZ - The Availability Zone. An example is us-east-1a.

    \n
  • \n
  • \n

    CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments.\n Valid values are SingleAZ and MultiAZ.

    \n
  • \n
  • \n

    INSTANCE_TYPE - The type of Amazon EC2 instance. An example is\n m4.xlarge.

    \n
  • \n
  • \n

    LINKED_ACCOUNT - The description in the attribute map that includes the full name\n of the member account. The value field contains the Amazon Web Services ID of the member\n account.

    \n
  • \n
  • \n

    PLATFORM - The Amazon EC2 operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    REGION - The Amazon Web Services Region.

    \n
  • \n
  • \n

    SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are\n regional or a single Availability Zone.

    \n
  • \n
  • \n

    TAG (Coverage only) - The tags that are associated with a Reserved Instance\n (RI).

    \n
  • \n
  • \n

    TENANCY - The tenancy of a resource. Examples are shared or dedicated.

    \n
  • \n
\n

If you set the context to SAVINGS_PLANS, you can use the following\n dimensions for searching:

\n
    \n
  • \n

    SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)

    \n
  • \n
  • \n

    PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All\n Upfront)

    \n
  • \n
  • \n

    REGION - The Amazon Web Services Region.

    \n
  • \n
  • \n

    INSTANCE_TYPE_FAMILY - The family of instances (For example,\n m5)

    \n
  • \n
  • \n

    LINKED_ACCOUNT - The description in the attribute map that includes the full name\n of the member account. The value field contains the Amazon Web Services ID of the member\n account.

    \n
  • \n
  • \n

    SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.

    \n
  • \n
" + "smithy.api#documentation": "

The context for the call to GetDimensionValues. This can be\n RESERVATIONS or COST_AND_USAGE. The default value is\n COST_AND_USAGE. If the context is set to RESERVATIONS, the\n resulting dimension values can be used in the GetReservationUtilization\n operation. If the context is set to COST_AND_USAGE, the resulting dimension\n values can be used in the GetCostAndUsage operation.

\n

If you set the context to COST_AND_USAGE, you can use the following\n dimensions for searching:

\n
    \n
  • \n

    AZ - The Availability Zone. An example is us-east-1a.

    \n
  • \n
  • \n

    BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible\n values are the following:

    \n

    - Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Servicesservices.

    \n

    - AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that's an acting\n reseller for Amazon Web Servicesservices in India.

    \n

    - Amazon Web Services Marketplace: The entity that supports the sale of solutions that are built on\n Amazon Web Services by third-party software providers.

    \n
  • \n
  • \n

    CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments.\n Valid values are SingleAZ and MultiAZ.

    \n
  • \n
  • \n

    DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are\n Aurora or MySQL.

    \n
  • \n
  • \n

    INSTANCE_TYPE - The type of Amazon EC2 instance. An example is\n m4.xlarge.

    \n
  • \n
  • \n

    INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use\n cases. Examples are Compute Optimized (for example, C4,\n C5, C6g, and C7g), Memory\n Optimization (for example, R4, R5n, R5b,\n and R6g).

    \n
  • \n
  • \n

    INVOICING_ENTITY - The name of the entity that issues the Amazon Web Services\n invoice.

    \n
  • \n
  • \n

    LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services\n services, such as Amazon Web Services.

    \n
  • \n
  • \n

    LINKED_ACCOUNT - The description in the attribute map that includes the full name\n of the member account. The value field contains the Amazon Web Services ID of the member\n account.

    \n
  • \n
  • \n

    OPERATING_SYSTEM - The operating system. Examples are Windows or Linux.

    \n
  • \n
  • \n

    OPERATION - The action performed. Examples include RunInstance and\n CreateBucket.

    \n
  • \n
  • \n

    PLATFORM - The Amazon EC2 operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    PURCHASE_TYPE - The reservation type of the purchase that this usage is related to.\n Examples include On-Demand Instances and Standard Reserved Instances.

    \n
  • \n
  • \n

    RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation\n Instance.

    \n
  • \n
  • \n

    SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.

    \n
  • \n
  • \n

    SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute).

    \n
  • \n
  • \n

    SERVICE - The Amazon Web Services service such as Amazon DynamoDB.

    \n
  • \n
  • \n

    TENANCY - The tenancy of a resource. Examples are shared or dedicated.

    \n
  • \n
  • \n

    USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response\n for the GetDimensionValues operation includes a unit attribute. Examples\n include GB and Hrs.

    \n
  • \n
  • \n

    USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2:\n CloudWatch – Alarms. The response for this operation includes a unit attribute.

    \n
  • \n
  • \n

    REGION - The Amazon Web Services Region.

    \n
  • \n
  • \n

    RECORD_TYPE - The different types of charges such as Reserved Instance (RI) fees,\n usage costs, tax refunds, and credits.

    \n
  • \n
  • \n

    RESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in\n feature only available for last 14 days for EC2-Compute Service.

    \n
  • \n
\n

If you set the context to RESERVATIONS, you can use the following\n dimensions for searching:

\n
    \n
  • \n

    AZ - The Availability Zone. An example is us-east-1a.

    \n
  • \n
  • \n

    CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments.\n Valid values are SingleAZ and MultiAZ.

    \n
  • \n
  • \n

    INSTANCE_TYPE - The type of Amazon EC2 instance. An example is\n m4.xlarge.

    \n
  • \n
  • \n

    LINKED_ACCOUNT - The description in the attribute map that includes the full name\n of the member account. The value field contains the Amazon Web Services ID of the member\n account.

    \n
  • \n
  • \n

    PLATFORM - The Amazon EC2 operating system. Examples are Windows or\n Linux.

    \n
  • \n
  • \n

    REGION - The Amazon Web Services Region.

    \n
  • \n
  • \n

    SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are\n regional or a single Availability Zone.

    \n
  • \n
  • \n

    TAG (Coverage only) - The tags that are associated with a Reserved Instance\n (RI).

    \n
  • \n
  • \n

    TENANCY - The tenancy of a resource. Examples are shared or dedicated.

    \n
  • \n
\n

If you set the context to SAVINGS_PLANS, you can use the following\n dimensions for searching:

\n
    \n
  • \n

    SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)

    \n
  • \n
  • \n

    PAYMENT_OPTION - The payment option for the given Savings Plans (for example, All\n Upfront)

    \n
  • \n
  • \n

    REGION - The Amazon Web Services Region.

    \n
  • \n
  • \n

    INSTANCE_TYPE_FAMILY - The family of instances (For example,\n m5)

    \n
  • \n
  • \n

    LINKED_ACCOUNT - The description in the attribute map that includes the full name\n of the member account. The value field contains the Amazon Web Services ID of the member\n account.

    \n
  • \n
  • \n

    SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.

    \n
  • \n
" } }, "Filter": { @@ -7304,19 +7324,19 @@ "AverageUtilization": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

The average utilization of your instances. Amazon Web Services uses this to calculate\n your recommended reservation purchases.

" + "smithy.api#documentation": "

The average utilization of your recommendations. Amazon Web Services uses this to\n calculate your recommended reservation purchases.

" } }, "EstimatedBreakEvenInMonths": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

How long Amazon Web Services estimates that it takes for this instance to start saving\n you money, in months.

" + "smithy.api#documentation": "

How long Amazon Web Services estimates that it takes for this recommendation to start\n saving you money, in months.

" } }, "CurrencyCode": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

The currency code that Amazon Web Services used to calculate the costs for this\n instance.

" + "smithy.api#documentation": "

The currency code that Amazon Web Services used to calculate the costs for this\n recommendation.

" } }, "EstimatedMonthlySavingsAmount": { @@ -7346,13 +7366,43 @@ "UpfrontCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

How much purchasing this instance costs you upfront.

" + "smithy.api#documentation": "

How much purchasing this recommendation costs you upfront.

" } }, "RecurringStandardMonthlyCost": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

How much purchasing this instance costs you on a monthly basis.

" + "smithy.api#documentation": "

How much purchasing this recommendation costs you on a monthly basis.

" + } + }, + "ReservedCapacityDetails": { + "target": "com.amazonaws.costexplorer#ReservedCapacityDetails", + "traits": { + "smithy.api#documentation": "

Details about the reservations that Amazon Web Services recommends that you\n purchase.

" + } + }, + "RecommendedNumberOfCapacityUnitsToPurchase": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "

The number of reserved capacity units that Amazon Web Services recommends that you\n purchase.

" + } + }, + "MinimumNumberOfCapacityUnitsUsedPerHour": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "

The minimum number of provisioned capacity units that you used in an hour during the\n historical period. Amazon Web Services uses this to calculate your recommended\n reservation purchases.

" + } + }, + "MaximumNumberOfCapacityUnitsUsedPerHour": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "

The maximum number of provisioned capacity units that you used in an hour during the\n historical period. Amazon Web Services uses this to calculate your recommended\n reservation purchases.

" + } + }, + "AverageNumberOfCapacityUnitsUsedPerHour": { + "target": "com.amazonaws.costexplorer#GenericString", + "traits": { + "smithy.api#documentation": "

The average number of provisioned capacity units that you used in an hour during the\n historical period. Amazon Web Services uses this to calculate your recommended\n reservation purchases.

" } } }, @@ -7462,6 +7512,20 @@ "target": "com.amazonaws.costexplorer#ReservationUtilizationGroup" } }, + "com.amazonaws.costexplorer#ReservedCapacityDetails": { + "type": "structure", + "members": { + "DynamoDBCapacityDetails": { + "target": "com.amazonaws.costexplorer#DynamoDBCapacityDetails", + "traits": { + "smithy.api#documentation": "

The DynamoDB reservations that Amazon Web Services recommends that you purchase.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the reservations that Amazon Web Services recommends that you\n purchase.

" + } + }, "com.amazonaws.costexplorer#ReservedHours": { "type": "string" }, @@ -7777,7 +7841,7 @@ "Service": { "target": "com.amazonaws.costexplorer#GenericString", "traits": { - "smithy.api#documentation": "

The Amazon Web Service name that's associated with the cost anomaly.

" + "smithy.api#documentation": "

The Amazon Web Servicesservice name that's associated with the cost anomaly.

" } }, "Region": { @@ -7806,7 +7870,7 @@ } }, "traits": { - "smithy.api#documentation": "

The combination of Amazon Web Service, linked account, linked account name,\n Region, and usage type where a cost anomaly is observed. The linked account name will\n only be available when the account name can be identified.

" + "smithy.api#documentation": "

The combination of Amazon Web Servicesservice, linked account, linked account name,\n Region, and usage type where a cost anomaly is observed. The linked account name will\n only be available when the account name can be identified.

" } }, "com.amazonaws.costexplorer#RootCauses": { diff --git a/models/customer-profiles.json b/models/customer-profiles.json index 4776891979..3c6672f0a1 100644 --- a/models/customer-profiles.json +++ b/models/customer-profiles.json @@ -1624,7 +1624,7 @@ "name": "profile" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Connect Customer Profiles\n

Amazon Connect Customer Profiles is a unified customer profile for your contact\n center that has pre-built connectors powered by AppFlow that make it easy to combine\n customer information from third party applications, such as Salesforce (CRM), ServiceNow\n (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

\n

For more information about the Amazon Connect Customer Profiles feature, see Use Customer\n Profiles in the Amazon Connect Administrator's Guide.

", + "smithy.api#documentation": "Amazon Connect Customer Profiles\n \n

Amazon Connect Customer Profiles is a unified customer profile for your contact\n center that has pre-built connectors powered by AppFlow that make it easy to combine\n customer information from third party applications, such as Salesforce (CRM), ServiceNow\n (ITSM), and your enterprise resource planning (ERP), with contact history from your Amazon Connect contact center.

\n

For more information about the Amazon Connect Customer Profiles feature, see Use Customer\n Profiles in the Amazon Connect Administrator's Guide.

", "smithy.api#title": "Amazon Connect Customer Profiles", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -4524,6 +4524,12 @@ "traits": { "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in\n Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in\n flowDefinition.

" } + }, + "RoleArn": { + "target": "com.amazonaws.customerprofiles#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make\n Customer Profiles requests on your behalf.

" + } } }, "traits": { @@ -6200,6 +6206,12 @@ "traits": { "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in\n Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in\n flowDefinition.

" } + }, + "RoleArn": { + "target": "com.amazonaws.customerprofiles#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make\n Customer Profiles requests on your behalf.

" + } } }, "traits": { @@ -7922,6 +7934,12 @@ "traits": { "smithy.api#documentation": "

A map in which each key is an event type from an external application such as Segment or Shopify, and each value is an ObjectTypeName (template) used to ingest the event.\nIt supports the following event types: SegmentIdentify, ShopifyCreateCustomers, ShopifyUpdateCustomers, ShopifyCreateDraftOrders, \nShopifyUpdateDraftOrders, ShopifyCreateOrders, and ShopifyUpdatedOrders.

" } + }, + "RoleArn": { + "target": "com.amazonaws.customerprofiles#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make\n Customer Profiles requests on your behalf.

" + } } }, "traits": { @@ -7988,6 +8006,12 @@ "traits": { "smithy.api#documentation": "

Boolean that shows if the Flow that's associated with the Integration is created in\n Amazon Appflow, or with ObjectTypeName equals _unstructured via API/CLI in\n flowDefinition.

" } + }, + "RoleArn": { + "target": "com.amazonaws.customerprofiles#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role. The Integration uses this role to make\n Customer Profiles requests on your behalf.

" + } } }, "traits": { diff --git a/models/database-migration-service.json b/models/database-migration-service.json index 99fe7b4985..2d7e6a4e84 100644 --- a/models/database-migration-service.json +++ b/models/database-migration-service.json @@ -90,6 +90,9 @@ "target": "com.amazonaws.databasemigrationservice#AddTagsToResourceResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" } @@ -161,6 +164,9 @@ { "target": "com.amazonaws.databasemigrationservice#CancelReplicationTaskAssessmentRun" }, + { + "target": "com.amazonaws.databasemigrationservice#CreateDataMigration" + }, { "target": "com.amazonaws.databasemigrationservice#CreateDataProvider" }, @@ -197,6 +203,9 @@ { "target": "com.amazonaws.databasemigrationservice#DeleteConnection" }, + { + "target": "com.amazonaws.databasemigrationservice#DeleteDataMigration" + }, { "target": "com.amazonaws.databasemigrationservice#DeleteDataProvider" }, @@ -248,6 +257,9 @@ { "target": "com.amazonaws.databasemigrationservice#DescribeConversionConfiguration" }, + { + "target": "com.amazonaws.databasemigrationservice#DescribeDataMigrations" + }, { "target": "com.amazonaws.databasemigrationservice#DescribeDataProviders" }, @@ -374,6 +386,9 @@ { "target": "com.amazonaws.databasemigrationservice#ModifyConversionConfiguration" }, + { + "target": "com.amazonaws.databasemigrationservice#ModifyDataMigration" + }, { "target": "com.amazonaws.databasemigrationservice#ModifyDataProvider" }, @@ -422,6 +437,9 @@ { "target": "com.amazonaws.databasemigrationservice#RunFleetAdvisorLsaAnalysis" }, + { + "target": "com.amazonaws.databasemigrationservice#StartDataMigration" + }, { "target": "com.amazonaws.databasemigrationservice#StartExtensionPackAssociation" }, @@ -455,6 +473,9 @@ { "target": "com.amazonaws.databasemigrationservice#StartReplicationTaskAssessmentRun" }, + { + "target": "com.amazonaws.databasemigrationservice#StopDataMigration" + }, { "target": "com.amazonaws.databasemigrationservice#StopReplication" }, @@ -2253,7 +2274,7 @@ "MinCapacityUnits": { "target": "com.amazonaws.databasemigrationservice#IntegerOptional", "traits": { - "smithy.api#documentation": "

Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS\n Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value\n allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU\n value that you can specify for DMS Serverless is 1. You don't have to specify a value for the\n MinCapacityUnits parameter. If you don't set this value, DMS scans the current activity\n of available source tables to identify an optimum setting for this parameter. If there is no current\n source activity or DMS can't otherwise identify a more appropriate value, it sets this parameter to\n the minimum DCU value allowed, 1.

" + "smithy.api#documentation": "

Specifies the minimum value of the DMS capacity units (DCUs) for which a given DMS\n Serverless replication can be provisioned. A single DCU is 2GB of RAM, with 1 DCU as the minimum value\n allowed. The list of valid DCU values includes 1, 2, 4, 8, 16, 32, 64, 128, 192, 256, and 384. So, the minimum DCU\n value that you can specify for DMS Serverless is 1. If you don't set this value, DMS sets this parameter to the \n minimum DCU value allowed, 1. If there is no current source activity, DMS scales down your replication until it \n reaches the value specified in MinCapacityUnits.

" } }, "MultiAZ": { @@ -2338,6 +2359,114 @@ } } }, + "com.amazonaws.databasemigrationservice#CreateDataMigration": { + "type": "operation", + "input": { + "target": "com.amazonaws.databasemigrationservice#CreateDataMigrationMessage" + }, + "output": { + "target": "com.amazonaws.databasemigrationservice#CreateDataMigrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidOperationFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceAlreadyExistsFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceQuotaExceededFault" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a data migration using the provided settings.

" + } + }, + "com.amazonaws.databasemigrationservice#CreateDataMigrationMessage": { + "type": "structure", + "members": { + "DataMigrationName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

A user-friendly name for the data migration. Data migration names\n have the following constraints:

\n
    \n
  • \n

    Must begin with a letter, and can only contain ASCII letters,\n digits, and hyphens.

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens.

    \n
  • \n
  • \n

    Length must be from 1 to 255 characters.

    \n
  • \n
" + } + }, + "MigrationProjectIdentifier": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

An identifier for the migration project.

", + "smithy.api#required": {} + } + }, + "DataMigrationType": { + "target": "com.amazonaws.databasemigrationservice#MigrationTypeValue", + "traits": { + "smithy.api#documentation": "

Specifies if the data migration is full-load only, change data capture (CDC) only, or full-load and CDC.

", + "smithy.api#required": {} + } + }, + "ServiceAccessRoleArn": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the service access role that you want to use to\n create the data migration.

", + "smithy.api#required": {} + } + }, + "EnableCloudwatchLogs": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to enable CloudWatch logs for the data migration.

" + } + }, + "SourceDataSettings": { + "target": "com.amazonaws.databasemigrationservice#SourceDataSettings", + "traits": { + "smithy.api#documentation": "

Specifies information about the source data provider.

" + } + }, + "NumberOfJobs": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target.

" + } + }, + "Tags": { + "target": "com.amazonaws.databasemigrationservice#TagList", + "traits": { + "smithy.api#documentation": "

One or more tags to be assigned to the data migration.

" + } + }, + "SelectionRules": { + "target": "com.amazonaws.databasemigrationservice#SecretString", + "traits": { + "smithy.api#documentation": "

An optional JSON string specifying what tables, views, and schemas\n to include or exclude from the migration.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.databasemigrationservice#CreateDataMigrationResponse": { + "type": "structure", + "members": { + "DataMigration": { + "target": "com.amazonaws.databasemigrationservice#DataMigration", + "traits": { + "smithy.api#documentation": "

Information about the created data migration.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.databasemigrationservice#CreateDataProvider": { "type": "operation", "input": { @@ -2350,6 +2479,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceAlreadyExistsFault" }, @@ -2550,7 +2682,7 @@ "EngineName": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The type of engine for the endpoint. Valid values, depending on the\n EndpointType value, include \"mysql\", \"oracle\",\n \"postgres\", \"mariadb\", \"aurora\", \n \"aurora-postgresql\", \"opensearch\", \"redshift\", \"s3\",\n \"db2\", \"db2-zos\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\",\n \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\",\n \"sqlserver\", \"neptune\", and \"babelfish\".

", + "smithy.api#documentation": "

The type of engine for the endpoint. Valid values, depending on the\n EndpointType value, include \"mysql\", \"oracle\",\n \"postgres\", \"mariadb\", \"aurora\", \n \"aurora-postgresql\", \"opensearch\", \"redshift\", \"s3\",\n \"db2\", \"db2-zos\", \"azuredb\", \"sybase\", \"dynamodb\", \"mongodb\",\n \"kinesis\", \"kafka\", \"elasticsearch\", \"docdb\",\n \"sqlserver\", \"neptune\", \"babelfish\",\n redshift-serverless, aurora-serverless, aurora-postgresql-serverless,\n gcp-mysql, azure-sql-managed-instance, redis, dms-transfer.

", "smithy.api#required": {} } }, @@ -2987,6 +3119,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -3135,6 +3270,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceAlreadyExistsFault" }, @@ -3725,7 +3863,7 @@ "ReplicationSubnetGroupIdentifier": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The name for the replication subnet group. This value is stored as a lowercase\n string.

\n

Constraints: Must contain no more than 255 alphanumeric characters, periods, spaces,\n underscores, or hyphens. Must not be \"default\".

\n

Example: mySubnetgroup\n

", + "smithy.api#documentation": "

The name for the replication subnet group. This value is stored as a lowercase\n string.

\n

Constraints: Must contain no more than 255 alphanumeric characters, periods,\n underscores, or hyphens. Must not be \"default\".

\n

Example: mySubnetgroup\n

", "smithy.api#required": {} } }, @@ -3963,6 +4101,205 @@ } } }, + "com.amazonaws.databasemigrationservice#DataMigration": { + "type": "structure", + "members": { + "DataMigrationName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The user-friendly name for the data migration.

" + } + }, + "DataMigrationArn": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies this replication.

" + } + }, + "DataMigrationCreateTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The UTC time when DMS created the data migration.

" + } + }, + "DataMigrationStartTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The UTC time when DMS started the data migration.

" + } + }, + "DataMigrationEndTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The UTC time when data migration ended.

" + } + }, + "ServiceAccessRoleArn": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The IAM role that the data migration uses to access Amazon Web Services resources.

" + } + }, + "MigrationProjectArn": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the data migration's associated migration project.

" + } + }, + "DataMigrationType": { + "target": "com.amazonaws.databasemigrationservice#MigrationTypeValue", + "traits": { + "smithy.api#documentation": "

Specifies whether the data migration is full-load only, change data capture (CDC) only, or full-load and CDC.

" + } + }, + "DataMigrationSettings": { + "target": "com.amazonaws.databasemigrationservice#DataMigrationSettings", + "traits": { + "smithy.api#documentation": "

Specifies CloudWatch settings and selection rules for the data migration.

" + } + }, + "SourceDataSettings": { + "target": "com.amazonaws.databasemigrationservice#SourceDataSettings", + "traits": { + "smithy.api#documentation": "

Specifies information about the data migration's source data provider.

" + } + }, + "DataMigrationStatistics": { + "target": "com.amazonaws.databasemigrationservice#DataMigrationStatistics", + "traits": { + "smithy.api#documentation": "

Provides information about the data migration's run, including start and stop time, latency, and data migration progress.

" + } + }, + "DataMigrationStatus": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The current status of the data migration.

" + } + }, + "PublicIpAddresses": { + "target": "com.amazonaws.databasemigrationservice#PublicIpAddressList", + "traits": { + "smithy.api#documentation": "

The IP addresses of the endpoints for the data migration.

" + } + }, + "LastFailureMessage": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

Information about the data migration's most recent error or failure.

" + } + }, + "StopReason": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The reason the data migration last stopped.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

This object provides information about a DMS data migration.

" + } + }, + "com.amazonaws.databasemigrationservice#DataMigrationSettings": { + "type": "structure", + "members": { + "NumberOfJobs": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target.

" + } + }, + "CloudwatchLogsEnabled": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Whether to enable CloudWatch logging for the data migration.

" + } + }, + "SelectionRules": { + "target": "com.amazonaws.databasemigrationservice#SecretString", + "traits": { + "smithy.api#documentation": "

A JSON-formatted string that defines what objects to include and exclude from the migration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Options for configuring a data migration, including whether to enable CloudWatch logs,\n and the selection rules to use to include or exclude database objects from the migration.

" + } + }, + "com.amazonaws.databasemigrationservice#DataMigrationStatistics": { + "type": "structure", + "members": { + "TablesLoaded": { + "target": "com.amazonaws.databasemigrationservice#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of tables loaded in the current data migration run.

" + } + }, + "ElapsedTimeMillis": { + "target": "com.amazonaws.databasemigrationservice#Long", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The elapsed duration of the data migration run.

" + } + }, + "TablesLoading": { + "target": "com.amazonaws.databasemigrationservice#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The data migration's table loading progress.

" + } + }, + "FullLoadPercentage": { + "target": "com.amazonaws.databasemigrationservice#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The data migration's progress in the full-load migration phase.

" + } + }, + "CDCLatency": { + "target": "com.amazonaws.databasemigrationservice#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The current latency of the change data capture (CDC) operation.

" + } + }, + "TablesQueued": { + "target": "com.amazonaws.databasemigrationservice#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of tables that are waiting for processing.

" + } + }, + "TablesErrored": { + "target": "com.amazonaws.databasemigrationservice#Integer", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The number of tables that DMS failed to process.

" + } + }, + "StartTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The time when the migration started.

" + } + }, + "StopTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The time when the migration stopped or failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the data migration run, including start and stop time, latency, and migration progress.

" + } + }, + "com.amazonaws.databasemigrationservice#DataMigrations": { + "type": "list", + "member": { + "target": "com.amazonaws.databasemigrationservice#DataMigration" + } + }, "com.amazonaws.databasemigrationservice#DataProvider": { "type": "structure", "members": { @@ -4508,6 +4845,58 @@ "smithy.api#output": {} } }, + "com.amazonaws.databasemigrationservice#DeleteDataMigration": { + "type": "operation", + "input": { + "target": "com.amazonaws.databasemigrationservice#DeleteDataMigrationMessage" + }, + "output": { + "target": "com.amazonaws.databasemigrationservice#DeleteDataMigrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified data migration.

" + } + }, + "com.amazonaws.databasemigrationservice#DeleteDataMigrationMessage": { + "type": "structure", + "members": { + "DataMigrationIdentifier": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The identifier (name or ARN) of the data migration to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.databasemigrationservice#DeleteDataMigrationResponse": { + "type": "structure", + "members": { + "DataMigration": { + "target": "com.amazonaws.databasemigrationservice#DataMigration", + "traits": { + "smithy.api#documentation": "

The deleted data migration.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.databasemigrationservice#DeleteDataProvider": { "type": "operation", "input": { @@ -4520,6 +4909,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -4718,6 +5110,9 @@ "target": "smithy.api#Unit" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" + }, { "target": "com.amazonaws.databasemigrationservice#CollectorNotFoundFault" }, @@ -4738,6 +5133,9 @@ "target": "com.amazonaws.databasemigrationservice#DeleteFleetAdvisorDatabasesResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidOperationFault" }, @@ -4790,6 +5188,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -4865,6 +5266,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -5727,13 +6131,100 @@ "MigrationProjectIdentifier": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) for the schema conversion project.

" + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) for the schema conversion project.

" + } + }, + "ConversionConfiguration": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The configuration parameters for the schema conversion project.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.databasemigrationservice#DescribeDataMigrations": { + "type": "operation", + "input": { + "target": "com.amazonaws.databasemigrationservice#DescribeDataMigrationsMessage" + }, + "output": { + "target": "com.amazonaws.databasemigrationservice#DescribeDataMigrationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about data migrations.

", + "smithy.api#paginated": { + "inputToken": "Marker", + "outputToken": "Marker", + "items": "DataMigrations", + "pageSize": "MaxRecords" + } + } + }, + "com.amazonaws.databasemigrationservice#DescribeDataMigrationsMessage": { + "type": "structure", + "members": { + "Filters": { + "target": "com.amazonaws.databasemigrationservice#FilterList", + "traits": { + "smithy.api#documentation": "

Filters applied to the data migrations.

" + } + }, + "MaxRecords": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The maximum number of records to include in the response. If more records exist than the specified \n MaxRecords value, a pagination token called a marker is included in the response so that \n the remaining results can be retrieved.

" + } + }, + "Marker": { + "target": "com.amazonaws.databasemigrationservice#Marker", + "traits": { + "smithy.api#documentation": "

An optional pagination token provided by a previous request. If this parameter is specified, \n the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + } + }, + "WithoutSettings": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

An option to set to avoid returning information about settings. Use this to reduce\n overhead when setting information is too large. To use this option, choose\n true; otherwise, choose false (the default).

" + } + }, + "WithoutStatistics": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

An option to set to avoid returning information about statistics. Use this to reduce\n overhead when statistics information is too large. To use this option, choose\n true; otherwise, choose false (the default).

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.databasemigrationservice#DescribeDataMigrationsResponse": { + "type": "structure", + "members": { + "DataMigrations": { + "target": "com.amazonaws.databasemigrationservice#DataMigrations", + "traits": { + "smithy.api#documentation": "

Returns information about the data migrations used in the project.

" } }, - "ConversionConfiguration": { - "target": "com.amazonaws.databasemigrationservice#String", + "Marker": { + "target": "com.amazonaws.databasemigrationservice#Marker", "traits": { - "smithy.api#documentation": "

The configuration parameters for the schema conversion project.

" + "smithy.api#documentation": "

An optional pagination token provided by a previous request. If this parameter is specified, \n the response includes only records beyond the marker, up to the value specified by MaxRecords.

" } } }, @@ -5753,6 +6244,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" } @@ -6868,6 +7362,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" } @@ -7491,6 +7988,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" } @@ -10356,6 +10856,18 @@ "smithy.api#documentation": "

Provides information about a metadata model assessment exported to SQL.

" } }, + "com.amazonaws.databasemigrationservice#FailedDependencyFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.databasemigrationservice#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

A dependency threw an exception.

", + "smithy.api#error": "client" + } + }, "com.amazonaws.databasemigrationservice#Filter": { "type": "structure", "members": { @@ -11349,6 +11861,9 @@ "target": "com.amazonaws.databasemigrationservice#ListTagsForResourceResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" } @@ -11469,6 +11984,15 @@ "smithy.api#documentation": "

Provides information that defines a MariaDB data provider.

" } }, + "com.amazonaws.databasemigrationservice#Marker": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, "com.amazonaws.databasemigrationservice#MessageFormatValue": { "type": "enum", "members": { @@ -11803,6 +12327,100 @@ "smithy.api#output": {} } }, + "com.amazonaws.databasemigrationservice#ModifyDataMigration": { + "type": "operation", + "input": { + "target": "com.amazonaws.databasemigrationservice#ModifyDataMigrationMessage" + }, + "output": { + "target": "com.amazonaws.databasemigrationservice#ModifyDataMigrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies an existing DMS data migration.

" + } + }, + "com.amazonaws.databasemigrationservice#ModifyDataMigrationMessage": { + "type": "structure", + "members": { + "DataMigrationIdentifier": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The identifier (name or ARN) of the data migration to modify.

", + "smithy.api#required": {} + } + }, + "DataMigrationName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The new name for the data migration.

" + } + }, + "EnableCloudwatchLogs": { + "target": "com.amazonaws.databasemigrationservice#BooleanOptional", + "traits": { + "smithy.api#documentation": "

Whether to enable Cloudwatch logs for the data migration.

" + } + }, + "ServiceAccessRoleArn": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The new service access role ARN for the data migration.

" + } + }, + "DataMigrationType": { + "target": "com.amazonaws.databasemigrationservice#MigrationTypeValue", + "traits": { + "smithy.api#documentation": "

The new migration type for the data migration.

" + } + }, + "SourceDataSettings": { + "target": "com.amazonaws.databasemigrationservice#SourceDataSettings", + "traits": { + "smithy.api#documentation": "

The new information about the source data provider for the data migration.

" + } + }, + "NumberOfJobs": { + "target": "com.amazonaws.databasemigrationservice#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The number of parallel jobs that trigger parallel threads to unload the tables from the source, and then load them to the target.

" + } + }, + "SelectionRules": { + "target": "com.amazonaws.databasemigrationservice#SecretString", + "traits": { + "smithy.api#documentation": "

A JSON-formatted string that defines what objects to include and exclude from the migration.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.databasemigrationservice#ModifyDataMigrationResponse": { + "type": "structure", + "members": { + "DataMigration": { + "target": "com.amazonaws.databasemigrationservice#DataMigration", + "traits": { + "smithy.api#documentation": "

Information about the modified data migration.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.databasemigrationservice#ModifyDataProvider": { "type": "operation", "input": { @@ -11815,6 +12433,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -12307,6 +12928,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -12447,6 +13071,9 @@ { "target": "com.amazonaws.databasemigrationservice#AccessDeniedFault" }, + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, { "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" }, @@ -14293,6 +14920,15 @@ "smithy.api#documentation": "

Information about provisioning resources for an DMS serverless replication.

" } }, + "com.amazonaws.databasemigrationservice#PublicIpAddressList": { + "type": "list", + "member": { + "target": "com.amazonaws.databasemigrationservice#String" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.databasemigrationservice#RdsConfiguration": { "type": "structure", "members": { @@ -15181,6 +15817,9 @@ "target": "com.amazonaws.databasemigrationservice#RemoveTagsFromResourceResponse" }, "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, { "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" } @@ -16938,6 +17577,44 @@ "smithy.api#documentation": "

Describes a server in a Fleet Advisor collector inventory.

" } }, + "com.amazonaws.databasemigrationservice#SourceDataSetting": { + "type": "structure", + "members": { + "CDCStartPosition": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The change data capture (CDC) start position for the source data provider.

" + } + }, + "CDCStartTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The change data capture (CDC) start time for the source data provider.

" + } + }, + "CDCStopTime": { + "target": "com.amazonaws.databasemigrationservice#Iso8601DateTime", + "traits": { + "smithy.api#documentation": "

The change data capture (CDC) stop time for the source data provider.

" + } + }, + "SlotName": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The name of the replication slot on the source data provider. This attribute is only \n valid for a PostgreSQL or Aurora PostgreSQL source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Defines settings for a source data provider for a data migration.

" + } + }, + "com.amazonaws.databasemigrationservice#SourceDataSettings": { + "type": "list", + "member": { + "target": "com.amazonaws.databasemigrationservice#SourceDataSetting" + } + }, "com.amazonaws.databasemigrationservice#SourceIdsList": { "type": "list", "member": { @@ -16975,6 +17652,71 @@ } } }, + "com.amazonaws.databasemigrationservice#StartDataMigration": { + "type": "operation", + "input": { + "target": "com.amazonaws.databasemigrationservice#StartDataMigrationMessage" + }, + "output": { + "target": "com.amazonaws.databasemigrationservice#StartDataMigrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidOperationFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceQuotaExceededFault" + } + ], + "traits": { + "smithy.api#documentation": "

Starts the specified data migration.

" + } + }, + "com.amazonaws.databasemigrationservice#StartDataMigrationMessage": { + "type": "structure", + "members": { + "DataMigrationIdentifier": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The identifier (name or ARN) of the data migration to start.

", + "smithy.api#required": {} + } + }, + "StartType": { + "target": "com.amazonaws.databasemigrationservice#StartReplicationMigrationTypeValue", + "traits": { + "smithy.api#documentation": "

Specifies the start type for the data migration. Valid values include \n start-replication, reload-target, and resume-processing.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.databasemigrationservice#StartDataMigrationResponse": { + "type": "structure", + "members": { + "DataMigration": { + "target": "com.amazonaws.databasemigrationservice#DataMigration", + "traits": { + "smithy.api#documentation": "

The data migration that DMS started.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.databasemigrationservice#StartExtensionPackAssociation": { "type": "operation", "input": { @@ -17664,6 +18406,29 @@ "smithy.api#input": {} } }, + "com.amazonaws.databasemigrationservice#StartReplicationMigrationTypeValue": { + "type": "enum", + "members": { + "RELOAD_TARGET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "reload-target" + } + }, + "RESUME_PROCESSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "resume-processing" + } + }, + "START_REPLICATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "start-replication" + } + } + } + }, "com.amazonaws.databasemigrationservice#StartReplicationResponse": { "type": "structure", "members": { @@ -17989,6 +18754,58 @@ } } }, + "com.amazonaws.databasemigrationservice#StopDataMigration": { + "type": "operation", + "input": { + "target": "com.amazonaws.databasemigrationservice#StopDataMigrationMessage" + }, + "output": { + "target": "com.amazonaws.databasemigrationservice#StopDataMigrationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.databasemigrationservice#FailedDependencyFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#InvalidResourceStateFault" + }, + { + "target": "com.amazonaws.databasemigrationservice#ResourceNotFoundFault" + } + ], + "traits": { + "smithy.api#documentation": "

Stops the specified data migration.

" + } + }, + "com.amazonaws.databasemigrationservice#StopDataMigrationMessage": { + "type": "structure", + "members": { + "DataMigrationIdentifier": { + "target": "com.amazonaws.databasemigrationservice#String", + "traits": { + "smithy.api#documentation": "

The identifier (name or ARN) of the data migration to stop.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.databasemigrationservice#StopDataMigrationResponse": { + "type": "structure", + "members": { + "DataMigration": { + "target": "com.amazonaws.databasemigrationservice#DataMigration", + "traits": { + "smithy.api#documentation": "

The data migration that DMS stopped.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.databasemigrationservice#StopReplication": { "type": "operation", "input": { diff --git a/models/deadline.json b/models/deadline.json index 542e830283..903bfb185f 100644 --- a/models/deadline.json +++ b/models/deadline.json @@ -2519,15 +2519,13 @@ "template": { "target": "com.amazonaws.deadline#JobTemplate", "traits": { - "smithy.api#documentation": "

The job template to use for this job.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The job template to use for this job.

" } }, "templateType": { "target": "com.amazonaws.deadline#JobTemplateType", "traits": { - "smithy.api#documentation": "

The file type for the job template.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The file type for the job template.

" } }, "priority": { @@ -2575,6 +2573,12 @@ "smithy.api#default": 5, "smithy.api#documentation": "

The maximum number of retries for each task.

" } + }, + "sourceJobId": { + "target": "com.amazonaws.deadline#JobId", + "traits": { + "smithy.api#documentation": "

The job ID for the source job.

" + } } }, "traits": { @@ -7570,6 +7574,12 @@ "traits": { "smithy.api#documentation": "

The description of the job.

\n \n

This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.

\n
" } + }, + "sourceJobId": { + "target": "com.amazonaws.deadline#JobId", + "traits": { + "smithy.api#documentation": "

The job ID for the source job.

" + } } }, "traits": { @@ -10328,6 +10338,15 @@ "smithy.api#documentation": "

The details of job parameters.

" } }, + "com.amazonaws.deadline#JobParameterDefinition": { + "type": "document" + }, + "com.amazonaws.deadline#JobParameterDefinitions": { + "type": "list", + "member": { + "target": "com.amazonaws.deadline#JobParameterDefinition" + } + }, "com.amazonaws.deadline#JobParameters": { "type": "map", "key": { @@ -10399,6 +10418,9 @@ { "target": "com.amazonaws.deadline#ListJobMembers" }, + { + "target": "com.amazonaws.deadline#ListJobParameterDefinitions" + }, { "target": "com.amazonaws.deadline#ListSessionActions" }, @@ -10573,6 +10595,12 @@ "traits": { "smithy.api#documentation": "

The job parameters.

" } + }, + "sourceJobId": { + "target": "com.amazonaws.deadline#JobId", + "traits": { + "smithy.api#documentation": "

The job ID for the source job.

" + } } }, "traits": { @@ -10690,6 +10718,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of retries for a job.

" } + }, + "sourceJobId": { + "target": "com.amazonaws.deadline#JobId", + "traits": { + "smithy.api#documentation": "

The job ID for the source job.

" + } } }, "traits": { @@ -11651,6 +11685,125 @@ "smithy.api#output": {} } }, + "com.amazonaws.deadline#ListJobParameterDefinitions": { + "type": "operation", + "input": { + "target": "com.amazonaws.deadline#ListJobParameterDefinitionsRequest" + }, + "output": { + "target": "com.amazonaws.deadline#ListJobParameterDefinitionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.deadline#AccessDeniedException" + }, + { + "target": "com.amazonaws.deadline#InternalServerErrorException" + }, + { + "target": "com.amazonaws.deadline#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.deadline#ThrottlingException" + }, + { + "target": "com.amazonaws.deadline#ValidationException" + } + ], + "traits": { + "aws.iam#iamAction": { + "name": "ListJobParameterDefinitions", + "documentation": "Grants permission to get a job's parameter definitions in the job template", + "requiredActions": [ + "identitystore:ListGroupMembershipsForMember" + ] + }, + "smithy.api#documentation": "

Lists parameter definitions of a job.

", + "smithy.api#endpoint": { + "hostPrefix": "management." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/2023-10-12/farms/{farmId}/queues/{queueId}/jobs/{jobId}/parameter-definitions", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "jobParameterDefinitions" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.deadline#ListJobParameterDefinitionsRequest": { + "type": "structure", + "members": { + "farmId": { + "target": "com.amazonaws.deadline#FarmId", + "traits": { + "smithy.api#documentation": "

The farm ID of the job to list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "jobId": { + "target": "com.amazonaws.deadline#JobId", + "traits": { + "smithy.api#documentation": "

The job ID to include on the list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "queueId": { + "target": "com.amazonaws.deadline#QueueId", + "traits": { + "smithy.api#documentation": "

The queue ID to include on the list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.deadline#String", + "traits": { + "smithy.api#documentation": "

The token for the next set of results, or null to start from the beginning.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.deadline#MaxResults", + "traits": { + "smithy.api#default": 100, + "smithy.api#documentation": "

The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.deadline#ListJobParameterDefinitionsResponse": { + "type": "structure", + "members": { + "jobParameterDefinitions": { + "target": "com.amazonaws.deadline#JobParameterDefinitions", + "traits": { + "smithy.api#documentation": "

Lists parameter definitions of a job.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.deadline#String", + "traits": { + "smithy.api#documentation": "

If Deadline Cloud returns nextToken, then there are more results available. The value of nextToken is a unique pagination token for each page. To retrieve the next page, call the operation again using the returned token. Keep all other arguments unchanged. If no results remain, then nextToken is set to null. Each pagination token expires after 24 hours. If you provide a token that isn't valid, then you receive an HTTP 400 ValidationException error.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.deadline#ListJobs": { "type": "operation", "input": { diff --git a/models/directory-service-data.json b/models/directory-service-data.json new file mode 100644 index 0000000000..04f9162bda --- /dev/null +++ b/models/directory-service-data.json @@ -0,0 +1,3464 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.directoryservicedata#AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage" + }, + "Reason": { + "target": "com.amazonaws.directoryservicedata#AccessDeniedReason", + "traits": { + "smithy.api#documentation": "

Reason the request was unauthorized.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

You don't have permission to perform the request or access the directory. It can also\n occur when the DirectoryId doesn't exist or the user, member, or group might be\n outside of your organizational unit (OU).

\n

Make sure that you have the authentication and authorization to perform the action.\n Review the directory information in the request, and make sure that the object isn't outside\n of your OU.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.directoryservicedata#AccessDeniedReason": { + "type": "enum", + "members": { + "IAM_AUTH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IAM_AUTH" + } + }, + "DIRECTORY_AUTH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIRECTORY_AUTH" + } + }, + "DATA_DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATA_DISABLED" + } + } + } + }, + "com.amazonaws.directoryservicedata#AddGroupMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#AddGroupMemberRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#AddGroupMemberResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Adds an existing user, group, or computer as a group member.

", + "smithy.api#http": { + "uri": "/GroupMemberships/AddGroupMember", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#AddGroupMemberRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "GroupName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "MemberName": { + "target": "com.amazonaws.directoryservicedata#MemberName", + "traits": { + "smithy.api#documentation": "

The SAMAccountName of the user, group, or computer to add as a group member.\n

", + "smithy.api#required": {} + } + }, + "MemberRealm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group member. This parameter is required only\n when adding a member outside of your Managed Microsoft AD domain to a group inside of your\n Managed Microsoft AD domain. This parameter defaults to the Managed Microsoft AD domain.

\n \n

This parameter is case insensitive.

\n
" + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#AddGroupMemberResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#AttributeValue": { + "type": "union", + "members": { + "S": { + "target": "com.amazonaws.directoryservicedata#StringAttributeValue", + "traits": { + "smithy.api#documentation": "

Indicates that the attribute type value is a string. For example:

\n

\n \"S\": \"S Group\"\n

" + } + }, + "N": { + "target": "com.amazonaws.directoryservicedata#NumberAttributeValue", + "traits": { + "smithy.api#documentation": "

Indicates that the attribute type value is a number. For example:

\n

\n \"N\": \"16\"\n

" + } + }, + "BOOL": { + "target": "com.amazonaws.directoryservicedata#BooleanAttributeValue", + "traits": { + "smithy.api#documentation": "

Indicates that the attribute type value is a boolean. For example:

\n

\n \"BOOL\": true\n

" + } + }, + "SS": { + "target": "com.amazonaws.directoryservicedata#StringSetAttributeValue", + "traits": { + "smithy.api#documentation": "

Indicates that the attribute type value is a string set. For example:

\n

\n \"SS\": [\"sample_service_class/host.sample.com:1234/sample_service_name_1\",\n \"sample_service_class/host.sample.com:1234/sample_service_name_2\"]\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The data type for an attribute. Each attribute value is described as a name-value pair.\n The name is the AD schema name, and the value is the data itself. For a list of supported\n attributes, see Directory Service Data Attributes.\n

" + } + }, + "com.amazonaws.directoryservicedata#Attributes": { + "type": "map", + "key": { + "target": "com.amazonaws.directoryservicedata#LdapDisplayName" + }, + "value": { + "target": "com.amazonaws.directoryservicedata#AttributeValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "com.amazonaws.directoryservicedata#BooleanAttributeValue": { + "type": "boolean", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\x00-\\x7F]+$" + } + }, + "com.amazonaws.directoryservicedata#ConflictException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

This error will occur when you try to create a resource that conflicts with an existing\n object. It can also occur when adding a member to a group that the member is already\n in.

\n

This error can be caused by a request sent within the 8-hour idempotency window with the\n same client token but different input parameters. Client tokens should not be re-used across\n different requests. After 8 hours, any request with the same client token is treated as a new\n request.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.directoryservicedata#CreateGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#CreateGroupRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#CreateGroupResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new group.

", + "smithy.api#http": { + "uri": "/Groups/CreateGroup", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#CreateGroupRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "GroupType": { + "target": "com.amazonaws.directoryservicedata#GroupType", + "traits": { + "smithy.api#documentation": "

The AD group type. For details, see Active Directory security group type.

" + } + }, + "GroupScope": { + "target": "com.amazonaws.directoryservicedata#GroupScope", + "traits": { + "smithy.api#documentation": "

The scope of the AD group. For details, see Active Directory security group scope.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

An expression that defines one or more attributes with the data type and value of each\n attribute.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#CreateGroupResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

" + } + }, + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the group.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#CreateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#CreateUserRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#CreateUserResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new user.

", + "smithy.api#http": { + "uri": "/Users/CreateUser", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#CreateUserRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that’s associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "EmailAddress": { + "target": "com.amazonaws.directoryservicedata#EmailAddress", + "traits": { + "smithy.api#documentation": "

The email address of the user.

" + } + }, + "GivenName": { + "target": "com.amazonaws.directoryservicedata#GivenName", + "traits": { + "smithy.api#documentation": "

The first name of the user.

" + } + }, + "Surname": { + "target": "com.amazonaws.directoryservicedata#Surname", + "traits": { + "smithy.api#documentation": "

The last name of the user.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

An expression that defines one or more attribute names with the data type and value of\n each attribute. A key is an attribute name, and the value is a list of maps. For a list of\n supported attributes, see Directory Service Data Attributes.

\n \n

Attribute names are case insensitive.

\n
" + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#CreateUserResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory where the address block is added.

" + } + }, + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the user.

" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#DeleteGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#DeleteGroupRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#DeleteGroupResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a group.

", + "smithy.api#http": { + "uri": "/Groups/DeleteGroup", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#DeleteGroupRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#DeleteGroupResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#DeleteUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#DeleteUserRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#DeleteUserResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a user.

", + "smithy.api#http": { + "uri": "/Users/DeleteUser", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#DeleteUserRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#DeleteUserResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#DescribeGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#DescribeGroupRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#DescribeGroupResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about a specific group.

", + "smithy.api#http": { + "uri": "/Groups/DescribeGroup", + "method": "POST" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "DescribeGroupFailure", + "params": { + "DirectoryId": "d-1111111111", + "SAMAccountName": "test-group" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.directoryservicedata#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } + ] + } + }, + "com.amazonaws.directoryservicedata#DescribeGroupRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The Identifier (ID) of the directory associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group.

\n \n

This parameter is optional, so you can return groups outside of your Managed Microsoft AD\n domain. When no value is defined, only your Managed Microsoft AD groups are returned.

\n

This value is case insensitive.

\n
" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#LdapDisplayNameList", + "traits": { + "smithy.api#documentation": "

One or more attributes to be returned for the group. For a list of supported attributes,\n see Directory Service Data Attributes.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#DescribeGroupResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group.

" + } + }, + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the group.

" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

" + } + }, + "DistinguishedName": { + "target": "com.amazonaws.directoryservicedata#DistinguishedName", + "traits": { + "smithy.api#documentation": "

The distinguished name of the object.

" + } + }, + "GroupType": { + "target": "com.amazonaws.directoryservicedata#GroupType", + "traits": { + "smithy.api#documentation": "

The AD group type. For details, see Active Directory security group type.

" + } + }, + "GroupScope": { + "target": "com.amazonaws.directoryservicedata#GroupScope", + "traits": { + "smithy.api#documentation": "

The scope of the AD group. For details, see Active Directory security groups.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

The attribute values that are returned for the attribute names that are included in the\n request.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#DescribeUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#DescribeUserRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#DescribeUserResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about a specific user.

", + "smithy.api#http": { + "uri": "/Users/DescribeUser", + "method": "POST" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "DescribeUserFailure", + "params": { + "DirectoryId": "d-1111111111", + "SAMAccountName": "test-user" + }, + "expect": { + "failure": { + "errorId": "com.amazonaws.directoryservicedata#AccessDeniedException" + } + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-west-2" + } + } + ] + } + }, + "com.amazonaws.directoryservicedata#DescribeUserRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#LdapDisplayNameList", + "traits": { + "smithy.api#documentation": "

One or more attribute names to be returned for the user. A key is an attribute name, and\n the value is a list of maps. For a list of supported attributes, see Directory Service Data Attributes.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the user.

\n \n

This parameter is optional, so you can return users outside your Managed Microsoft AD domain.\n When no value is defined, only your Managed Microsoft AD users are returned.

\n

This value is case insensitive.

\n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#DescribeUserResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the user.

" + } + }, + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the user.

" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

" + } + }, + "DistinguishedName": { + "target": "com.amazonaws.directoryservicedata#DistinguishedName", + "traits": { + "smithy.api#documentation": "

The distinguished name of the object.

" + } + }, + "UserPrincipalName": { + "target": "com.amazonaws.directoryservicedata#UserPrincipalName", + "traits": { + "smithy.api#documentation": "

The UPN that is an Internet-style login name for a user and is based on the Internet\n standard RFC 822. The UPN is shorter\n than the distinguished name and easier to remember.

" + } + }, + "EmailAddress": { + "target": "com.amazonaws.directoryservicedata#EmailAddress", + "traits": { + "smithy.api#documentation": "

The email address of the user.

" + } + }, + "GivenName": { + "target": "com.amazonaws.directoryservicedata#GivenName", + "traits": { + "smithy.api#documentation": "

The first name of the user.

" + } + }, + "Surname": { + "target": "com.amazonaws.directoryservicedata#Surname", + "traits": { + "smithy.api#documentation": "

The last name of the user.

" + } + }, + "Enabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether the user account is active.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

The attribute values that are returned for the attribute names that are included in the\n request.

\n \n

Attribute names are case insensitive.

\n
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#DirectoryId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^d-[0-9a-f]{10}$" + } + }, + "com.amazonaws.directoryservicedata#DirectoryServiceData": { + "type": "service", + "version": "2023-05-31", + "operations": [ + { + "target": "com.amazonaws.directoryservicedata#AddGroupMember" + }, + { + "target": "com.amazonaws.directoryservicedata#CreateGroup" + }, + { + "target": "com.amazonaws.directoryservicedata#CreateUser" + }, + { + "target": "com.amazonaws.directoryservicedata#DeleteGroup" + }, + { + "target": "com.amazonaws.directoryservicedata#DeleteUser" + }, + { + "target": "com.amazonaws.directoryservicedata#DescribeGroup" + }, + { + "target": "com.amazonaws.directoryservicedata#DescribeUser" + }, + { + "target": "com.amazonaws.directoryservicedata#DisableUser" + }, + { + "target": "com.amazonaws.directoryservicedata#ListGroupMembers" + }, + { + "target": "com.amazonaws.directoryservicedata#ListGroups" + }, + { + "target": "com.amazonaws.directoryservicedata#ListGroupsForMember" + }, + { + "target": "com.amazonaws.directoryservicedata#ListUsers" + }, + { + "target": "com.amazonaws.directoryservicedata#RemoveGroupMember" + }, + { + "target": "com.amazonaws.directoryservicedata#SearchGroups" + }, + { + "target": "com.amazonaws.directoryservicedata#SearchUsers" + }, + { + "target": "com.amazonaws.directoryservicedata#UpdateGroup" + }, + { + "target": "com.amazonaws.directoryservicedata#UpdateUser" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "Directory Service Data", + "arnNamespace": "ds", + "cloudFormationName": "DirectoryServiceData", + "cloudTrailEventSource": "ds.amazonaws.com", + "endpointPrefix": "ds-data" + }, + "aws.auth#sigv4": { + "name": "ds-data" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

Amazon Web Services Directory Service Data is an extension of Directory Service. This API reference provides detailed information\n about Directory Service Data operations and object types.

\n

With Directory Service Data, you can create, read, update, and delete users, groups, and memberships from\n your Managed Microsoft AD without additional costs and without deploying dedicated management\n instances. You can also perform built-in object management tasks across directories without\n direct network connectivity, which simplifies provisioning and access management to achieve\n fully automated deployments. Directory Service Data supports user and group write operations, such as\n CreateUser and CreateGroup, within the organizational unit (OU) of\n your Managed Microsoft AD. Directory Service Data supports read operations, such as ListUsers and\n ListGroups, on all users, groups, and group memberships within your\n Managed Microsoft AD and across trusted realms. Directory Service Data supports adding and removing group members in\n your OU and the Amazon Web Services Delegated Groups OU, so you can grant and deny access to specific roles\n and permissions. For more information, see Manage users and\n groups in the Directory Service Administration Guide.

\n \n

Directory management operations and configuration changes made against the Directory Service\n API will also reflect in Directory Service Data API with eventual consistency. You can expect a short delay\n between management changes, such as adding a new directory trust and calling the Directory Service Data API\n for the newly created trusted realm.

\n
\n

Directory Service Data connects to your Managed Microsoft AD domain controllers and performs operations on\n underlying directory objects. When you create your Managed Microsoft AD, you choose subnets for domain\n controllers that Directory Service creates on your behalf. If a domain controller is unavailable, Directory Service Data\n uses an available domain controller. As a result, you might notice eventual consistency while\n objects replicate from one domain controller to another domain controller. For more\n information, see What\n gets created in the Directory Service Administration Guide.\n Directory limits vary by Managed Microsoft AD edition:

\n
    \n
  • \n

    \n Standard edition – Supports 8 transactions per\n second (TPS) for read operations and 4 TPS for write operations per directory. There's a\n concurrency limit of 10 concurrent requests.

    \n
  • \n
  • \n

    \n Enterprise edition – Supports 16 transactions per\n second (TPS) for read operations and 8 TPS for write operations per directory. There's a\n concurrency limit of 10 concurrent requests.

    \n
  • \n
  • \n

    \n Amazon Web Services Account - Supports a total of 100 TPS for\n Directory Service Data operations across all directories.

    \n
  • \n
\n

Directory Service Data only supports the Managed Microsoft AD directory type and is only available in the primary\n Amazon Web Services Region. For more information, see Managed Microsoft AD\n and Primary vs additional Regions in the Directory Service Administration\n Guide.

", + "smithy.api#title": "AWS Directory Service Data", + "smithy.api#xmlNamespace": { + "uri": "http://directoryservicedata.amazonaws.com/doc/2023-05-31/" + }, + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ds-data-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ds-data-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ds-data.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://ds-data.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ds-data.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ds-data.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://ds-data.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://ds-data.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.directoryservicedata#DirectoryUnavailableException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage" + }, + "Reason": { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableReason", + "traits": { + "smithy.api#documentation": "

Reason the request failed for the specified directory.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request could not be completed due to a problem in the configuration or current state\n of the specified directory.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.directoryservicedata#DirectoryUnavailableReason": { + "type": "enum", + "members": { + "INVALID_DIRECTORY_STATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_DIRECTORY_STATE" + } + }, + "DIRECTORY_TIMEOUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIRECTORY_TIMEOUT" + } + }, + "DIRECTORY_RESOURCES_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DIRECTORY_RESOURCES_EXCEEDED" + } + }, + "NO_DISK_SPACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NO_DISK_SPACE" + } + }, + "TRUST_AUTH_FAILURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TRUST_AUTH_FAILURE" + } + } + } + }, + "com.amazonaws.directoryservicedata#DisableUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#DisableUserRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#DisableUserResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deactivates an active user account. For information about how to enable an inactive user\n account, see ResetUserPassword\n in the Directory Service API Reference.

", + "smithy.api#http": { + "uri": "/Users/DisableUser", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#DisableUserRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#DisableUserResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#DistinguishedName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#EmailAddress": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#ExceptionMessage": { + "type": "string" + }, + "com.amazonaws.directoryservicedata#GivenName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#Group": { + "type": "structure", + "members": { + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the group.

" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "DistinguishedName": { + "target": "com.amazonaws.directoryservicedata#DistinguishedName", + "traits": { + "smithy.api#documentation": "

The distinguished name of the object.

" + } + }, + "GroupType": { + "target": "com.amazonaws.directoryservicedata#GroupType", + "traits": { + "smithy.api#documentation": "

The AD group type. For details, see Active Directory security group type.

" + } + }, + "GroupScope": { + "target": "com.amazonaws.directoryservicedata#GroupScope", + "traits": { + "smithy.api#documentation": "

The scope of the AD group. For details, see Active Directory security groups\n

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

An expression of one or more attributes, data types, and the values of a group.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A group object that contains identifying information and attributes for a specified\n group.

" + } + }, + "com.amazonaws.directoryservicedata#GroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#Group" + } + }, + "com.amazonaws.directoryservicedata#GroupName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$" + } + }, + "com.amazonaws.directoryservicedata#GroupScope": { + "type": "enum", + "members": { + "DOMAIN_LOCAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DomainLocal" + } + }, + "GLOBAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Global" + } + }, + "UNIVERSAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Universal" + } + }, + "BUILTIN_LOCAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BuiltinLocal" + } + } + } + }, + "com.amazonaws.directoryservicedata#GroupSummary": { + "type": "structure", + "members": { + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the group.

", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "GroupType": { + "target": "com.amazonaws.directoryservicedata#GroupType", + "traits": { + "smithy.api#documentation": "

The AD group type. For details, see Active Directory security group type.

", + "smithy.api#required": {} + } + }, + "GroupScope": { + "target": "com.amazonaws.directoryservicedata#GroupScope", + "traits": { + "smithy.api#documentation": "

The scope of the AD group. For details, see Active Directory security groups.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing a subset of fields of a group object from a directory.

" + } + }, + "com.amazonaws.directoryservicedata#GroupSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#GroupSummary" + } + }, + "com.amazonaws.directoryservicedata#GroupType": { + "type": "enum", + "members": { + "DISTRIBUTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Distribution" + } + }, + "SECURITY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Security" + } + } + } + }, + "com.amazonaws.directoryservicedata#InternalServerException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The operation didn't succeed because an internal error occurred. Try again later.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.directoryservicedata#LdapDisplayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[A-Za-z*][A-Za-z-*]*$" + } + }, + "com.amazonaws.directoryservicedata#LdapDisplayNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#LdapDisplayName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "com.amazonaws.directoryservicedata#ListGroupMembers": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#ListGroupMembersRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#ListGroupMembersResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns member information for the specified group.

\n

This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the\n ListGroupMembers.NextToken member contains a token that you pass in the next\n call to ListGroupMembers. This retrieves the next set of items.

\n

You can also specify a maximum number of return results with the MaxResults\n parameter.

", + "smithy.api#http": { + "uri": "/GroupMemberships/ListGroupMembers", + "method": "POST" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Members" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupMembersRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group.

\n \n

This parameter is optional, so you can return members from a group outside of your\n Managed Microsoft AD domain. When no value is defined, only members of your Managed Microsoft AD groups are\n returned.

\n

This value is case insensitive.

\n
" + } + }, + "MemberRealm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group member. This parameter defaults to the\n Managed Microsoft AD domain.

\n \n

This parameter is optional and case insensitive.

\n
" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.directoryservicedata#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupMembersResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

Identifier (ID) of the directory associated with the group.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group.

" + } + }, + "MemberRealm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the member.

" + } + }, + "Members": { + "target": "com.amazonaws.directoryservicedata#MemberList", + "traits": { + "smithy.api#documentation": "

The member information that the request returns.

" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#ListGroupsRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#ListGroupsResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns group information for the specified directory.

\n

This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the ListGroups.NextToken\n member contains a token that you pass in the next call to ListGroups. This\n retrieves the next set of items.

\n

You can also specify a maximum number of return results with the MaxResults\n parameter.

", + "smithy.api#http": { + "uri": "/Groups/ListGroups", + "method": "POST" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Groups" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupsForMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#ListGroupsForMemberRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#ListGroupsForMemberResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns group information for the specified member.

\n

This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the\n ListGroupsForMember.NextToken member contains a token that you pass in the next\n call to ListGroupsForMember. This retrieves the next set of items.

\n

You can also specify a maximum number of return results with the MaxResults\n parameter.

", + "smithy.api#http": { + "uri": "/GroupMemberships/ListGroupsForMember", + "method": "POST" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Groups" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupsForMemberRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the member.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group.

\n \n

This parameter is optional, so you can return groups outside of your Managed Microsoft AD\n domain. When no value is defined, only your Managed Microsoft AD groups are returned.

\n

This value is case insensitive and defaults to your Managed Microsoft AD domain.

\n
" + } + }, + "MemberRealm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group member.

\n \n

This parameter is optional, so you can limit your results to the group members in a\n specific domain.

\n

This parameter is case insensitive and defaults to Realm\n

\n
" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#MemberName", + "traits": { + "smithy.api#documentation": "

The SAMAccountName of the user, group, or computer that's a member of the\n group.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.directoryservicedata#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupsForMemberResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the member.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain that's associated with the group.

" + } + }, + "MemberRealm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain that's associated with the member.

" + } + }, + "Groups": { + "target": "com.amazonaws.directoryservicedata#GroupSummaryList", + "traits": { + "smithy.api#documentation": "

The group information that the request returns.

" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupsRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name associated with the directory.

\n \n

This parameter is optional, so you can return groups outside of your Managed Microsoft AD\n domain. When no value is defined, only your Managed Microsoft AD groups are returned.

\n

This value is case insensitive.

\n
" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.directoryservicedata#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#ListGroupsResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name associated with the group.

" + } + }, + "Groups": { + "target": "com.amazonaws.directoryservicedata#GroupSummaryList", + "traits": { + "smithy.api#documentation": "

The group information that the request returns.

" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#ListUsers": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#ListUsersRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#ListUsersResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns user information for the specified directory.

\n

This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the ListUsers.NextToken\n member contains a token that you pass in the next call to ListUsers. This\n retrieves the next set of items.

\n

You can also specify a maximum number of return results with the MaxResults\n parameter.

", + "smithy.api#http": { + "uri": "/Users/ListUsers", + "method": "POST" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Users" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.directoryservicedata#ListUsersRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the user.

\n \n

This parameter is optional, so you can return users outside of your Managed Microsoft AD\n domain. When no value is defined, only your Managed Microsoft AD users are returned.

\n

This value is case insensitive.

\n
" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.directoryservicedata#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#ListUsersResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain that's associated with the user.

" + } + }, + "Users": { + "target": "com.amazonaws.directoryservicedata#UserSummaryList", + "traits": { + "smithy.api#documentation": "

The user information that the request returns.

" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 250 + } + } + }, + "com.amazonaws.directoryservicedata#Member": { + "type": "structure", + "members": { + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the group member.

", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#MemberName", + "traits": { + "smithy.api#documentation": "

The name of the group member.

", + "smithy.api#required": {} + } + }, + "MemberType": { + "target": "com.amazonaws.directoryservicedata#MemberType", + "traits": { + "smithy.api#documentation": "

The AD type of the member object.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A member object that contains identifying information for a specified member.

" + } + }, + "com.amazonaws.directoryservicedata#MemberList": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#Member" + } + }, + "com.amazonaws.directoryservicedata#MemberName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[^:;|=+\"*?<>/\\\\,\\[\\]@]+$" + } + }, + "com.amazonaws.directoryservicedata#MemberType": { + "type": "enum", + "members": { + "USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USER" + } + }, + "GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GROUP" + } + }, + "COMPUTER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPUTER" + } + } + } + }, + "com.amazonaws.directoryservicedata#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 6144 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#NumberAttributeValue": { + "type": "long", + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#Realm": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^([a-zA-Z0-9]+[\\\\.-])+([a-zA-Z0-9])+[.]?$" + } + }, + "com.amazonaws.directoryservicedata#RemoveGroupMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#RemoveGroupMemberRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#RemoveGroupMemberResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes a member from a group.

", + "smithy.api#http": { + "uri": "/GroupMemberships/RemoveGroupMember", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#RemoveGroupMemberRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the member.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "GroupName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "MemberName": { + "target": "com.amazonaws.directoryservicedata#MemberName", + "traits": { + "smithy.api#documentation": "

The SAMAccountName of the user, group, or computer to remove from the group.\n

", + "smithy.api#required": {} + } + }, + "MemberRealm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group member. This parameter defaults to the\n Managed Microsoft AD domain.

\n \n

This parameter is optional and case insensitive.

\n
" + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#RemoveGroupMemberResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The resource couldn't be found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.directoryservicedata#SID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.directoryservicedata#SearchGroups": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#SearchGroupsRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#SearchGroupsResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches the specified directory for a group. You can find groups that match the\n SearchString parameter with the value of their attributes included in the\n SearchString parameter.

\n

This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the SearchGroups.NextToken\n member contains a token that you pass in the next call to SearchGroups. This\n retrieves the next set of items.

\n

You can also specify a maximum number of return results with the MaxResults\n parameter.

", + "smithy.api#http": { + "uri": "/Groups/SearchGroups", + "method": "POST" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Groups" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.directoryservicedata#SearchGroupsRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SearchString": { + "target": "com.amazonaws.directoryservicedata#SearchString", + "traits": { + "smithy.api#documentation": "

The attribute value that you want to search for.

\n \n

Wildcard (*) searches aren't supported. For a list of supported\n attributes, see Directory Service Data\n Attributes.

\n
", + "smithy.api#required": {} + } + }, + "SearchAttributes": { + "target": "com.amazonaws.directoryservicedata#LdapDisplayNameList", + "traits": { + "smithy.api#documentation": "

One or more data attributes that are used to search for a group. For a list of supported\n attributes, see Directory Service Data Attributes.\n

", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the group.

\n \n

This parameter is optional, so you can return groups outside of your Managed Microsoft AD\n domain. When no value is defined, only your Managed Microsoft AD groups are returned.

\n

This value is case insensitive.

\n
" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.directoryservicedata#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#SearchGroupsResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain that's associated with the group.

" + } + }, + "Groups": { + "target": "com.amazonaws.directoryservicedata#GroupList", + "traits": { + "smithy.api#documentation": "

The group information that the request returns.

" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#SearchString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#SearchUsers": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#SearchUsersRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#SearchUsersResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Searches the specified directory for a user. You can find users that match the\n SearchString parameter with the value of their attributes included in the\n SearchString parameter.

\n

This operation supports pagination with the use of the NextToken request and\n response parameters. If more results are available, the SearchUsers.NextToken\n member contains a token that you pass in the next call to SearchUsers. This\n retrieves the next set of items.

\n

You can also specify a maximum number of return results with the MaxResults\n parameter.

", + "smithy.api#http": { + "uri": "/Users/SearchUsers", + "method": "POST" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Users" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.directoryservicedata#SearchUsersRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain name that's associated with the user.

\n \n

This parameter is optional, so you can return users outside of your Managed Microsoft AD\n domain. When no value is defined, only your Managed Microsoft AD users are returned.

\n

This value is case insensitive.

\n
" + } + }, + "SearchString": { + "target": "com.amazonaws.directoryservicedata#SearchString", + "traits": { + "smithy.api#documentation": "

The attribute value that you want to search for.

\n \n

Wildcard (*) searches aren't supported. For a list of supported\n attributes, see Directory Service Data\n Attributes.

\n
", + "smithy.api#required": {} + } + }, + "SearchAttributes": { + "target": "com.amazonaws.directoryservicedata#LdapDisplayNameList", + "traits": { + "smithy.api#documentation": "

One or more data attributes that are used to search for a user. For a list of supported\n attributes, see Directory Service Data Attributes.\n

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.directoryservicedata#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#SearchUsersResult": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory where the address block is added.

" + } + }, + "Realm": { + "target": "com.amazonaws.directoryservicedata#Realm", + "traits": { + "smithy.api#documentation": "

The domain that's associated with the user.

" + } + }, + "Users": { + "target": "com.amazonaws.directoryservicedata#UserList", + "traits": { + "smithy.api#documentation": "

The user information that the request returns.

" + } + }, + "NextToken": { + "target": "com.amazonaws.directoryservicedata#NextToken", + "traits": { + "smithy.api#documentation": "

An encoded paging token for paginated calls that can be passed back to retrieve the next\n page.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#StringAttributeValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#StringSetAttributeValue": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#StringAttributeValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 25 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#Surname": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage", + "traits": { + "smithy.api#required": {} + } + }, + "RetryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The recommended amount of seconds to retry after a throttling exception.

", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

The limit on the number of requests per second has been exceeded.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.directoryservicedata#UpdateGroup": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#UpdateGroupRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#UpdateGroupResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates group information.

", + "smithy.api#http": { + "uri": "/Groups/UpdateGroup", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#UpdateGroupRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the group.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#required": {} + } + }, + "GroupType": { + "target": "com.amazonaws.directoryservicedata#GroupType", + "traits": { + "smithy.api#documentation": "

The AD group type. For details, see Active Directory security group type.

" + } + }, + "GroupScope": { + "target": "com.amazonaws.directoryservicedata#GroupScope", + "traits": { + "smithy.api#documentation": "

The scope of the AD group. For details, see Active Directory security groups.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

An expression that defines one or more attributes with the data type and the value of\n each attribute.

" + } + }, + "UpdateType": { + "target": "com.amazonaws.directoryservicedata#UpdateType", + "traits": { + "smithy.api#documentation": "

The type of update to be performed. If no value exists for the attribute, use\n ADD. Otherwise, use REPLACE to change an attribute value or\n REMOVE to clear the attribute value.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#UpdateGroupResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#UpdateType": { + "type": "enum", + "members": { + "ADD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ADD" + } + }, + "REPLACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLACE" + } + }, + "REMOVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REMOVE" + } + } + } + }, + "com.amazonaws.directoryservicedata#UpdateUser": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservicedata#UpdateUserRequest" + }, + "output": { + "target": "com.amazonaws.directoryservicedata#UpdateUserResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservicedata#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservicedata#ConflictException" + }, + { + "target": "com.amazonaws.directoryservicedata#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservicedata#InternalServerException" + }, + { + "target": "com.amazonaws.directoryservicedata#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.directoryservicedata#ThrottlingException" + }, + { + "target": "com.amazonaws.directoryservicedata#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates user information.

", + "smithy.api#http": { + "uri": "/Users/UpdateUser", + "method": "POST" + } + } + }, + "com.amazonaws.directoryservicedata#UpdateUserRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservicedata#DirectoryId", + "traits": { + "smithy.api#documentation": "

The identifier (ID) of the directory that's associated with the user.

", + "smithy.api#httpQuery": "DirectoryId", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "EmailAddress": { + "target": "com.amazonaws.directoryservicedata#EmailAddress", + "traits": { + "smithy.api#documentation": "

The email address of the user.

" + } + }, + "GivenName": { + "target": "com.amazonaws.directoryservicedata#GivenName", + "traits": { + "smithy.api#documentation": "

The first name of the user.

" + } + }, + "Surname": { + "target": "com.amazonaws.directoryservicedata#Surname", + "traits": { + "smithy.api#documentation": "

The last name of the user.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

An expression that defines one or more attribute names with the data type and value of\n each attribute. A key is an attribute name, and the value is a list of maps. For a list of\n supported attributes, see Directory Service Data Attributes.

\n \n

Attribute names are case insensitive.

\n
" + } + }, + "UpdateType": { + "target": "com.amazonaws.directoryservicedata#UpdateType", + "traits": { + "smithy.api#documentation": "

The type of update to be performed. If no value exists for the attribute, use\n ADD. Otherwise, use REPLACE to change an attribute value or\n REMOVE to clear the attribute value.

" + } + }, + "ClientToken": { + "target": "com.amazonaws.directoryservicedata#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique and case-sensitive identifier that you provide to make sure the idempotency of\n the request, so multiple identical calls have the same effect as one single call.

\n

A client token is valid for 8 hours after the first request that uses it completes. After\n 8 hours, any request with the same client token is treated as a new request. If the request\n succeeds, any future uses of that token will be idempotent for another 8 hours.

\n

If you submit a request with the same client token but change one of the other parameters\n within the 8-hour idempotency window, Directory Service Data returns an ConflictException.

\n \n

This parameter is optional when using the CLI or SDK.

\n
", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservicedata#UpdateUserResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.directoryservicedata#User": { + "type": "structure", + "members": { + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the user.

" + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "DistinguishedName": { + "target": "com.amazonaws.directoryservicedata#DistinguishedName", + "traits": { + "smithy.api#documentation": "

The distinguished name of the object.

" + } + }, + "UserPrincipalName": { + "target": "com.amazonaws.directoryservicedata#UserPrincipalName", + "traits": { + "smithy.api#documentation": "

The UPN that is an internet-style login name for a user and based on the internet\n standard RFC 822. The UPN is shorter\n than the distinguished name and easier to remember.

" + } + }, + "EmailAddress": { + "target": "com.amazonaws.directoryservicedata#EmailAddress", + "traits": { + "smithy.api#documentation": "

The email address of the user.

" + } + }, + "GivenName": { + "target": "com.amazonaws.directoryservicedata#GivenName", + "traits": { + "smithy.api#documentation": "

The first name of the user.

" + } + }, + "Surname": { + "target": "com.amazonaws.directoryservicedata#Surname", + "traits": { + "smithy.api#documentation": "

The last name of the user.

" + } + }, + "Enabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether the user account is active.

" + } + }, + "OtherAttributes": { + "target": "com.amazonaws.directoryservicedata#Attributes", + "traits": { + "smithy.api#documentation": "

An expression that includes one or more attributes, data types, and values of a\n user.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A user object that contains identifying information and attributes for a specified user.\n

" + } + }, + "com.amazonaws.directoryservicedata#UserList": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#User" + } + }, + "com.amazonaws.directoryservicedata#UserName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + }, + "smithy.api#pattern": "^[\\w\\-.]+$" + } + }, + "com.amazonaws.directoryservicedata#UserPrincipalName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.directoryservicedata#UserSummary": { + "type": "structure", + "members": { + "SID": { + "target": "com.amazonaws.directoryservicedata#SID", + "traits": { + "smithy.api#documentation": "

The unique security identifier (SID) of the user.

", + "smithy.api#required": {} + } + }, + "SAMAccountName": { + "target": "com.amazonaws.directoryservicedata#UserName", + "traits": { + "smithy.api#documentation": "

The name of the user.

", + "smithy.api#required": {} + } + }, + "GivenName": { + "target": "com.amazonaws.directoryservicedata#GivenName", + "traits": { + "smithy.api#documentation": "

The first name of the user.

" + } + }, + "Surname": { + "target": "com.amazonaws.directoryservicedata#Surname", + "traits": { + "smithy.api#documentation": "

The last name of the user.

" + } + }, + "Enabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether the user account is active.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure containing a subset of the fields of a user object from a directory.

" + } + }, + "com.amazonaws.directoryservicedata#UserSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.directoryservicedata#UserSummary" + } + }, + "com.amazonaws.directoryservicedata#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.directoryservicedata#ExceptionMessage" + }, + "Reason": { + "target": "com.amazonaws.directoryservicedata#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

Reason the request failed validation.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request isn't valid. Review the details in the error message to update the invalid\n parameters or values in your request.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.directoryservicedata#ValidationExceptionReason": { + "type": "enum", + "members": { + "INVALID_REALM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_REALM" + } + }, + "INVALID_DIRECTORY_TYPE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_DIRECTORY_TYPE" + } + }, + "INVALID_SECONDARY_REGION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_SECONDARY_REGION" + } + }, + "INVALID_NEXT_TOKEN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_NEXT_TOKEN" + } + }, + "INVALID_ATTRIBUTE_VALUE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ATTRIBUTE_VALUE" + } + }, + "INVALID_ATTRIBUTE_NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ATTRIBUTE_NAME" + } + }, + "INVALID_ATTRIBUTE_FOR_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ATTRIBUTE_FOR_USER" + } + }, + "INVALID_ATTRIBUTE_FOR_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ATTRIBUTE_FOR_GROUP" + } + }, + "INVALID_ATTRIBUTE_FOR_SEARCH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ATTRIBUTE_FOR_SEARCH" + } + }, + "INVALID_ATTRIBUTE_FOR_MODIFY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_ATTRIBUTE_FOR_MODIFY" + } + }, + "DUPLICATE_ATTRIBUTE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DUPLICATE_ATTRIBUTE" + } + }, + "MISSING_ATTRIBUTE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MISSING_ATTRIBUTE" + } + }, + "ATTRIBUTE_EXISTS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ATTRIBUTE_EXISTS" + } + }, + "LDAP_SIZE_LIMIT_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LDAP_SIZE_LIMIT_EXCEEDED" + } + }, + "LDAP_UNSUPPORTED_OPERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LDAP_UNSUPPORTED_OPERATION" + } + } + } + } + } +} \ No newline at end of file diff --git a/models/directory-service.json b/models/directory-service.json index f7c9f940be..abef873a0e 100644 --- a/models/directory-service.json +++ b/models/directory-service.json @@ -98,7 +98,7 @@ } }, "traits": { - "smithy.api#documentation": "

Client authentication is not available in this region at this time.

", + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", "smithy.api#error": "client" } }, @@ -167,7 +167,7 @@ "target": "com.amazonaws.directoryservice#UpdateSecurityGroupForDirectoryControllers", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

If set to true, updates the inbound and outbound rules of the security group that has\n the description: \"Amazon Web Services created security group for directory ID\n directory controllers.\" Following are the new rules:

\n

Inbound:

\n
    \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source:\n 0.0.0.0/0

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source:\n 0.0.0.0/0

    \n
  • \n
  • \n

    Type: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0

    \n
  • \n
  • \n

    Type: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0

    \n
  • \n
\n

\n

Outbound:

\n
    \n
  • \n

    Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0

    \n
  • \n
\n

These security rules impact an internal network interface that is not exposed\n publicly.

" + "smithy.api#documentation": "

If set to true, updates the inbound and outbound rules of the security group that has\n the description: \"Amazon Web Services created security group for directory ID\n directory controllers.\" Following are the new rules:

\n

Inbound:

\n
    \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source:\n Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source:\n Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: DNS (UDP), Protocol: UDP, Range: 53, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: DNS (TCP), Protocol: TCP, Range: 53, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: LDAP, Protocol: TCP, Range: 389, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
  • \n

    Type: All ICMP, Protocol: All, Range: N/A, Source: Managed Microsoft AD VPC IPv4 CIDR

    \n
  • \n
\n

\n

Outbound:

\n
    \n
  • \n

    Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0

    \n
  • \n
\n

These security rules impact an internal network interface that is not exposed\n publicly.

" } } }, @@ -1630,7 +1630,7 @@ "TrustPassword": { "target": "com.amazonaws.directoryservice#TrustPassword", "traits": { - "smithy.api#documentation": "

The trust password. The must be the same password that was used when creating the trust\n relationship on the external domain.

", + "smithy.api#documentation": "

The trust password. The trust password must be the same password that was used when creating the trust\n relationship on the external domain.

", "smithy.api#required": {} } }, @@ -1699,6 +1699,41 @@ "smithy.api#pattern": "^(?!.*\\\\|.*\"|.*\\/|.*\\[|.*\\]|.*:|.*;|.*\\||.*=|.*,|.*\\+|.*\\*|.*\\?|.*<|.*>|.*@).*$" } }, + "com.amazonaws.directoryservice#DataAccessStatus": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Disabled" + } + }, + "DISABLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Disabling" + } + }, + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Enabled" + } + }, + "ENABLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Enabling" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + } + } + }, "com.amazonaws.directoryservice#DeleteAssociatedConditionalForwarder": { "type": "boolean", "traits": { @@ -2442,6 +2477,64 @@ "smithy.api#output": {} } }, + "com.amazonaws.directoryservice#DescribeDirectoryDataAccess": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservice#DescribeDirectoryDataAccessRequest" + }, + "output": { + "target": "com.amazonaws.directoryservice#DescribeDirectoryDataAccessResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservice#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservice#ClientException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryDoesNotExistException" + }, + { + "target": "com.amazonaws.directoryservice#ServiceException" + }, + { + "target": "com.amazonaws.directoryservice#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

Obtains status of directory data access enablement through the Directory Service Data API for the specified directory.

" + } + }, + "com.amazonaws.directoryservice#DescribeDirectoryDataAccessRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservice#DirectoryId", + "traits": { + "smithy.api#documentation": "

The directory identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservice#DescribeDirectoryDataAccessResult": { + "type": "structure", + "members": { + "DataAccessStatus": { + "target": "com.amazonaws.directoryservice#DataAccessStatus", + "traits": { + "smithy.api#documentation": "

The current status of data access through the Directory Service Data API.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.directoryservice#DescribeDomainControllers": { "type": "operation", "input": { @@ -2849,7 +2942,7 @@ "NextToken": { "target": "com.amazonaws.directoryservice#NextToken", "traits": { - "smithy.api#documentation": "

If not null, token that indicates that more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeSettings to retrieve the next set of items.

" + "smithy.api#documentation": "

If not null, token that indicates that more results are available. \n Pass this value for the NextToken parameter in a subsequent \n call to DescribeSettings to retrieve the next set of items.

" } } }, @@ -3526,7 +3619,7 @@ "Type": { "target": "com.amazonaws.directoryservice#DirectoryType", "traits": { - "smithy.api#documentation": "

The directory size.

" + "smithy.api#documentation": "

The directory type.

" } }, "VpcSettings": { @@ -3846,6 +3939,9 @@ { "target": "com.amazonaws.directoryservice#DescribeDirectories" }, + { + "target": "com.amazonaws.directoryservice#DescribeDirectoryDataAccess" + }, { "target": "com.amazonaws.directoryservice#DescribeDomainControllers" }, @@ -3876,6 +3972,9 @@ { "target": "com.amazonaws.directoryservice#DisableClientAuthentication" }, + { + "target": "com.amazonaws.directoryservice#DisableDirectoryDataAccess" + }, { "target": "com.amazonaws.directoryservice#DisableLDAPS" }, @@ -3888,6 +3987,9 @@ { "target": "com.amazonaws.directoryservice#EnableClientAuthentication" }, + { + "target": "com.amazonaws.directoryservice#EnableDirectoryDataAccess" + }, { "target": "com.amazonaws.directoryservice#EnableLDAPS" }, @@ -5063,6 +5165,12 @@ "traits": { "smithy.api#enumValue": "Failed" } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Updating" + } } } }, @@ -5106,7 +5214,7 @@ } }, "traits": { - "smithy.api#documentation": "

The specified directory is unavailable or could not be found.

", + "smithy.api#documentation": "

The specified directory is unavailable.

", "smithy.api#error": "client" } }, @@ -5209,7 +5317,7 @@ "Type": { "target": "com.amazonaws.directoryservice#ClientAuthenticationType", "traits": { - "smithy.api#documentation": "

The type of client authentication to disable. Currently, only the parameter, SmartCard is supported.

", + "smithy.api#documentation": "

The type of client authentication to disable. Currently the only parameter \"SmartCard\" is supported.

", "smithy.api#required": {} } } @@ -5225,6 +5333,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.directoryservice#DisableDirectoryDataAccess": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservice#DisableDirectoryDataAccessRequest" + }, + "output": { + "target": "com.amazonaws.directoryservice#DisableDirectoryDataAccessResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservice#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservice#ClientException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryDoesNotExistException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryInDesiredStateException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservice#ServiceException" + }, + { + "target": "com.amazonaws.directoryservice#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deactivates access to directory data via the Directory Service Data API for the specified directory.

" + } + }, + "com.amazonaws.directoryservice#DisableDirectoryDataAccessRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservice#DirectoryId", + "traits": { + "smithy.api#documentation": "

The directory identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservice#DisableDirectoryDataAccessResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.directoryservice#DisableLDAPS": { "type": "operation", "input": { @@ -5546,6 +5711,12 @@ "traits": { "smithy.api#enumValue": "Failed" } + }, + "UPDATING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Updating" + } } } }, @@ -5622,6 +5793,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.directoryservice#EnableDirectoryDataAccess": { + "type": "operation", + "input": { + "target": "com.amazonaws.directoryservice#EnableDirectoryDataAccessRequest" + }, + "output": { + "target": "com.amazonaws.directoryservice#EnableDirectoryDataAccessResult" + }, + "errors": [ + { + "target": "com.amazonaws.directoryservice#AccessDeniedException" + }, + { + "target": "com.amazonaws.directoryservice#ClientException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryDoesNotExistException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryInDesiredStateException" + }, + { + "target": "com.amazonaws.directoryservice#DirectoryUnavailableException" + }, + { + "target": "com.amazonaws.directoryservice#ServiceException" + }, + { + "target": "com.amazonaws.directoryservice#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

Enables access to directory data via the Directory Service Data API for the specified directory.

" + } + }, + "com.amazonaws.directoryservice#EnableDirectoryDataAccessRequest": { + "type": "structure", + "members": { + "DirectoryId": { + "target": "com.amazonaws.directoryservice#DirectoryId", + "traits": { + "smithy.api#documentation": "

The directory identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.directoryservice#EnableDirectoryDataAccessResult": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.directoryservice#EnableLDAPS": { "type": "operation", "input": { @@ -7069,7 +7297,7 @@ "target": "com.amazonaws.directoryservice#RadiusRetries", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The maximum number of times that communication with the RADIUS server is\n attempted.

" + "smithy.api#documentation": "

The maximum number of times that communication with the RADIUS server is retried after the initial attempt.

" } }, "SharedSecret": { @@ -7141,7 +7369,7 @@ "traits": { "smithy.api#range": { "min": 1, - "max": 20 + "max": 50 } } }, @@ -7708,7 +7936,7 @@ } ], "traits": { - "smithy.api#documentation": "

Resets the password for any user in your Managed Microsoft AD or Simple AD\n directory.

\n

You can reset the password for any user in your directory with the following\n exceptions:

\n
    \n
  • \n

    For Simple AD, you cannot reset the password for any user that is a member of either\n the Domain Admins or Enterprise\n Admins group except for the administrator user.

    \n
  • \n
  • \n

    For Managed Microsoft AD, you can only reset the password for a user that is in an\n OU based off of the NetBIOS name that you typed when you created your directory. For\n example, you cannot reset the password for a user in the Amazon Web Services\n Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration\n Guide.

    \n
  • \n
" + "smithy.api#documentation": "

Resets the password for any user in your Managed Microsoft AD or Simple AD\n directory. Disabled users will become enabled and can be authenticated following the API call.

\n

You can reset the password for any user in your directory with the following\n exceptions:

\n
    \n
  • \n

    For Simple AD, you cannot reset the password for any user that is a member of either\n the Domain Admins or Enterprise\n Admins group except for the administrator user.

    \n
  • \n
  • \n

    For Managed Microsoft AD, you can only reset the password for a user that is in an\n OU based off of the NetBIOS name that you typed when you created your directory. For\n example, you cannot reset the password for a user in the Amazon Web Services\n Reserved OU. For more information about the OU structure for an Managed Microsoft AD directory, see What Gets Created in the Directory Service Administration\n Guide.

    \n
  • \n
" } }, "com.amazonaws.directoryservice#ResetUserPasswordRequest": { @@ -8654,14 +8882,14 @@ "Key": { "target": "com.amazonaws.directoryservice#TagKey", "traits": { - "smithy.api#documentation": "

Required name of the tag. The string value can be Unicode characters and cannot be\n prefixed with \"aws:\". The string can contain only the set of Unicode letters, digits,\n white-space, '_', '.', '/', '=', '+', '-' (Java regex:\n \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

", + "smithy.api#documentation": "

Required name of the tag. The string value can be Unicode characters and cannot be\n prefixed with \"aws:\". The string can contain only the set of Unicode letters, digits,\n white-space, '_', '.', '/', '=', '+', '-', ':', '@'(Java regex:\n \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

", "smithy.api#required": {} } }, "Value": { "target": "com.amazonaws.directoryservice#TagValue", "traits": { - "smithy.api#documentation": "

The optional value of the tag. The string value can be Unicode characters. The string\n can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-'\n (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

", + "smithy.api#documentation": "

The optional value of the tag. The string value can be Unicode characters. The string\n can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-', ':', '@'\n (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").

", "smithy.api#required": {} } } diff --git a/models/dynamodb.json b/models/dynamodb.json index ddddb6a7ea..f953105638 100644 --- a/models/dynamodb.json +++ b/models/dynamodb.json @@ -3884,6 +3884,18 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "AccountId": { + "builtIn": "AWS::Auth::AccountId", + "required": false, + "documentation": "The AWS AccountId used for the request.", + "type": "String" + }, + "AccountIdEndpointMode": { + "builtIn": "AWS::Auth::AccountIdEndpointMode", + "required": false, + "documentation": "The AccountId Endpoint Mode.", + "type": "String" } }, "rules": [ @@ -3932,9 +3944,7 @@ { "conditions": [], "endpoint": { - "url": { - "ref": "Endpoint" - }, + "url": "{Endpoint}", "properties": {}, "headers": {} }, @@ -3968,6 +3978,176 @@ } ], "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "ref": "Region" + }, + "local" + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and local endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and local endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": "http://localhost:8000", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountIdEndpointMode" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountIdEndpointMode" + }, + "required" + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + } + ] + } + ], + "error": "AccountIdEndpointMode is required but no AccountID was provided or able to be loaded.", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "AccountId" + }, + false + ] + } + ] + } + ], + "error": "Credentials-sourced account ID parameter is invalid", + "type": "error" + }, { "conditions": [ { @@ -3995,7 +4175,6 @@ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -4004,13 +4183,13 @@ }, "supportsFIPS" ] - } + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -4019,11 +4198,39 @@ }, "supportsDualStack" ] - } + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountIdEndpointMode" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountIdEndpointMode" + }, + "disabled" + ] + } + ], + "endpoint": { + "url": "https://dynamodb-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -4094,8 +4301,68 @@ ] } ], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountIdEndpointMode" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountIdEndpointMode" + }, + "disabled" + ] + } + ], + "endpoint": { + "url": "https://dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountIdEndpointMode" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountIdEndpointMode" + }, + "disabled" + ] + } + ], "endpoint": { - "url": "https://dynamodb.{Region}.amazonaws.com", + "url": "https://dynamodb-fips.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, @@ -4139,7 +4406,6 @@ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -4148,11 +4414,39 @@ }, "supportsDualStack" ] - } + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountIdEndpointMode" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "ref": "AccountIdEndpointMode" + }, + "disabled" + ] + } + ], + "endpoint": { + "url": "https://dynamodb.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [], "endpoint": { @@ -4175,52 +4469,113 @@ }, { "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountIdEndpointMode" + } + ] + }, { "fn": "stringEquals", "argv": [ { - "ref": "Region" + "ref": "AccountIdEndpointMode" }, - "local" + "disabled" ] } ], - "endpoint": { - "url": "http://localhost:8000", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "dynamodb", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [], "endpoint": { "url": "https://dynamodb.{Region}.{PartitionResult#dnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" - } - ], - "type": "tree" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "AccountId" + } + ] + }, + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ] + } + ], + "endpoint": { + "url": "https://{AccountId}.ddb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://dynamodb.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] }, "smithy.rules#endpointTests": { "testCases": [ @@ -4931,6 +5286,843 @@ "expect": { "error": "Invalid Configuration: Missing Region" } + }, + { + "documentation": "For custom endpoint with account ID available, FIPS enabled, and DualStack enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with account ID available, FIPS enabled, and DualStack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "SDK::Endpoint": "https://example.com", + "AWS::Auth::AccountId": "012345678901" + }, + "operationName": "ListTables" + } + ], + "params": { + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with empty account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "SDK::Endpoint": "https://example.com", + "AWS::Auth::AccountId": "" + }, + "operationName": "ListTables" + } + ], + "params": { + "AccountId": "", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For region local with account ID available, FIPS enabled, and DualStack enabled", + "expect": { + "error": "Invalid Configuration: FIPS and local endpoint are not supported" + }, + "params": { + "Region": "local", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region local with account ID available, FIPS enabled, and DualStack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and local endpoint are not supported" + }, + "params": { + "Region": "local", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region local with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and local endpoint are not supported" + }, + "params": { + "Region": "local", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region local with account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://localhost:8000" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "local", + "AWS::Auth::AccountId": "012345678901" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "local", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region local with empty account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "dynamodb", + "signingRegion": "us-east-1" + } + ] + }, + "url": "http://localhost:8000" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "local", + "AWS::Auth::AccountId": "" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "local", + "AccountId": "", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For AccountIdEndpointMode required and no AccountId set", + "expect": { + "error": "AccountIdEndpointMode is required but no AccountID was provided or able to be loaded." + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountIdEndpointMode": "required" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountIdEndpointMode": "required" + } + }, + { + "documentation": "For region us-east-1 with account ID available, FIPS enabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.us-east-1.api.aws" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with account ID available, FIPS enabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseFIPS": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode preferred, FIPS enabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::Auth::AccountIdEndpointMode": "preferred", + "AWS::UseFIPS": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "preferred", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode required, FIPS enabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::Auth::AccountIdEndpointMode": "required", + "AWS::UseFIPS": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "required", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-east-1.api.aws" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseDualStack": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-east-1.api.aws" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseDualStack": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode preferred, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "preferred", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode disabled, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "disabled", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode required, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "required", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode preferred, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://012345678901.ddb.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::Auth::AccountIdEndpointMode": "preferred" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false, + "AccountIdEndpointMode": "preferred" + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode required, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://012345678901.ddb.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::Auth::AccountIdEndpointMode": "required" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false, + "AccountIdEndpointMode": "required" + } + }, + { + "documentation": "For region us-east-1 with account ID available, AccountIdEndpointMode disabled, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::Auth::AccountIdEndpointMode": "disabled" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false, + "AccountIdEndpointMode": "disabled" + } + }, + { + "documentation": "For region us-east-1 with empty account ID, FIPS disabled, and DualStack disabled", + "expect": { + "error": "Credentials-sourced account ID parameter is invalid" + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-1", + "AWS::Auth::AccountId": " " + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-east-1", + "AccountId": "", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with account ID available, FIPS enabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with account ID available, FIPS enabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.cn-north-1.amazonaws.com.cn" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseFIPS": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseDualStack": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.cn-north-1.amazonaws.com.cn" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::Auth::AccountId": "012345678901" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with account ID available, AccountIdEndpointMode preferred, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "preferred", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with account ID available, AccountIdEndpointMode disabled, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "disabled", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with account ID available, AccountIdEndpointMode required, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "AccountId": "012345678901", + "AccountIdEndpointMode": "required", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with empty account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.cn-north-1.amazonaws.com.cn" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "cn-north-1", + "AWS::Auth::AccountId": "" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "cn-north-1", + "AccountId": "", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with account ID available, FIPS enabled, and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with account ID available, FIPS enabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-iso-east-1", + "AWS::Auth::AccountId": "012345678901", + "AWS::UseFIPS": true + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-iso-east-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-iso-east-1.c2s.ic.gov" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-iso-east-1", + "AWS::Auth::AccountId": "012345678901" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-iso-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with empty account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-iso-east-1.c2s.ic.gov" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-iso-east-1", + "AWS::Auth::AccountId": "012345678901" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-iso-east-1", + "AccountId": "", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with account ID available, FIPS enabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with account ID available, FIPS enabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "AccountId": "012345678901", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with account ID available, FIPS disabled, and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-gov-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-gov-east-1", + "AWS::Auth::AccountId": "012345678901" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-gov-east-1", + "AccountId": "012345678901", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with empty account ID available, FIPS disabled, and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://dynamodb.us-gov-east-1.amazonaws.com" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-gov-east-1", + "AWS::Auth::AccountId": "" + }, + "operationName": "ListTables" + } + ], + "params": { + "Region": "us-gov-east-1", + "AccountId": "", + "UseFIPS": false, + "UseDualStack": false + } } ], "version": "1.0" diff --git a/models/ec2.json b/models/ec2.json index 19c0dbb217..06e20ca53d 100644 --- a/models/ec2.json +++ b/models/ec2.json @@ -336,6 +336,56 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#AcceptCapacityReservationBillingOwnership": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#AcceptCapacityReservationBillingOwnershipRequest" + }, + "output": { + "target": "com.amazonaws.ec2#AcceptCapacityReservationBillingOwnershipResult" + }, + "traits": { + "smithy.api#documentation": "

Accepts a request to assign billing of the available capacity of a shared Capacity Reservation to your \n\t\t\taccount. For more information, see \n\t\t\t\tBilling assignment for shared Amazon EC2 Capacity Reservations.

" + } + }, + "com.amazonaws.ec2#AcceptCapacityReservationBillingOwnershipRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "CapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the Capacity Reservation for which to accept the request.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#AcceptCapacityReservationBillingOwnershipResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, it returns an error.

", + "smithy.api#xmlName": "return" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#AcceptReservedInstancesExchangeQuote": { "type": "operation", "input": { @@ -879,6 +929,16 @@ } } }, + "com.amazonaws.ec2#AccountID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 12 + }, + "smithy.api#pattern": "^[0-9]{12}$" + } + }, "com.amazonaws.ec2#ActiveInstance": { "type": "structure", "members": { @@ -1150,22 +1210,6 @@ "com.amazonaws.ec2#Address": { "type": "structure", "members": { - "InstanceId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#documentation": "

The ID of the instance that the address is associated with (if any).

", - "smithy.api#xmlName": "instanceId" - } - }, - "PublicIp": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PublicIp", - "smithy.api#documentation": "

The Elastic IP address.

", - "smithy.api#xmlName": "publicIp" - } - }, "AllocationId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -1261,6 +1305,22 @@ "smithy.api#documentation": "

The carrier IP address associated. This option is only available for network interfaces\n which reside in a subnet in a Wavelength Zone (for example an EC2 instance).

", "smithy.api#xmlName": "carrierIp" } + }, + "InstanceId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The ID of the instance that the address is associated with (if any).

", + "smithy.api#xmlName": "instanceId" + } + }, + "PublicIp": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PublicIp", + "smithy.api#documentation": "

The Elastic IP address.

", + "smithy.api#xmlName": "publicIp" + } } }, "traits": { @@ -1587,14 +1647,6 @@ "smithy.api#documentation": "

The ID of a customer-owned address pool. Use this parameter to let Amazon EC2 \n select an address from the address pool. Alternatively, specify a specific \n address from the address pool.

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { @@ -1605,7 +1657,15 @@ "IpamPoolId": { "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "smithy.api#documentation": "

The ID of an IPAM pool.

" + "smithy.api#documentation": "

The ID of an IPAM pool which has an Amazon-provided or BYOIP public IPv4 CIDR provisioned to it. For more information, see Allocate sequential Elastic IP addresses from an IPAM pool in the Amazon VPC IPAM User Guide.

" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" } } }, @@ -1616,14 +1676,6 @@ "com.amazonaws.ec2#AllocateAddressResult": { "type": "structure", "members": { - "PublicIp": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PublicIp", - "smithy.api#documentation": "

The Elastic IP address.

", - "smithy.api#xmlName": "publicIp" - } - }, "AllocationId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -1679,6 +1731,14 @@ "smithy.api#documentation": "

The carrier IP address. This option is only available for network interfaces that reside\n in a subnet in a Wavelength Zone.

", "smithy.api#xmlName": "carrierIp" } + }, + "PublicIp": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PublicIp", + "smithy.api#documentation": "

The Elastic IP address.

", + "smithy.api#xmlName": "publicIp" + } } }, "traits": { @@ -1700,54 +1760,12 @@ "com.amazonaws.ec2#AllocateHostsRequest": { "type": "structure", "members": { - "AutoPlacement": { - "target": "com.amazonaws.ec2#AutoPlacement", - "traits": { - "aws.protocols#ec2QueryName": "AutoPlacement", - "smithy.api#documentation": "

Indicates whether the host accepts any untargeted instance launches that match its\n instance type configuration, or if it only accepts Host tenancy instance launches that\n specify its unique host ID. For more information, see Understanding auto-placement and affinity in the\n Amazon EC2 User Guide.

\n

Default: off\n

", - "smithy.api#xmlName": "autoPlacement" - } - }, - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Availability Zone in which to allocate the Dedicated Host.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "availabilityZone" - } - }, - "ClientToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", - "smithy.api#xmlName": "clientToken" - } - }, - "InstanceType": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "InstanceType", - "smithy.api#documentation": "

Specifies the instance type to be supported by the Dedicated Hosts. If you specify an\n instance type, the Dedicated Hosts support instances of the specified instance type\n only.

\n

If you want the Dedicated Hosts to support multiple instance types in a specific\n instance family, omit this parameter and specify InstanceFamily instead. You cannot specify InstanceType and InstanceFamily in the\n same request.

", - "smithy.api#xmlName": "instanceType" - } - }, "InstanceFamily": { "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#documentation": "

Specifies the instance family to be supported by the Dedicated Hosts. If you specify\n an instance family, the Dedicated Hosts support multiple instance types within that\n instance family.

\n

If you want the Dedicated Hosts to support a specific instance type only, omit this\n parameter and specify InstanceType instead. You cannot\n specify InstanceFamily and InstanceType in the same request.

" } }, - "Quantity": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "Quantity", - "smithy.api#documentation": "

The number of Dedicated Hosts to allocate to your account with these parameters. If you are \n allocating the Dedicated Hosts on an Outpost, and you specify AssetIds, \n you can omit this parameter. In this case, Amazon EC2 allocates a Dedicated Host on each \n specified hardware asset. If you specify both AssetIds and \n Quantity, then the value that you specify for \n Quantity must be equal to the number of asset IDs specified.

", - "smithy.api#xmlName": "quantity" - } - }, "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { @@ -1779,6 +1797,48 @@ "smithy.api#documentation": "

The IDs of the Outpost hardware assets on which to allocate the Dedicated Hosts. Targeting \n specific hardware assets on an Outpost can help to minimize latency between your workloads. \n This parameter is supported only if you specify OutpostArn. \n If you are allocating the Dedicated Hosts in a Region, omit this parameter.

\n
    \n
  • \n

    If you specify this parameter, you can omit Quantity. \n In this case, Amazon EC2 allocates a Dedicated Host on each specified hardware \n asset.

    \n
  • \n
  • \n

    If you specify both AssetIds and \n Quantity, then the value for \n Quantity must be equal to the number of asset IDs \n specified.

    \n
  • \n
", "smithy.api#xmlName": "AssetId" } + }, + "AutoPlacement": { + "target": "com.amazonaws.ec2#AutoPlacement", + "traits": { + "aws.protocols#ec2QueryName": "AutoPlacement", + "smithy.api#documentation": "

Indicates whether the host accepts any untargeted instance launches that match its\n instance type configuration, or if it only accepts Host tenancy instance launches that\n specify its unique host ID. For more information, see Understanding auto-placement and affinity in the\n Amazon EC2 User Guide.

\n

Default: off\n

", + "smithy.api#xmlName": "autoPlacement" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ClientToken", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

", + "smithy.api#xmlName": "clientToken" + } + }, + "InstanceType": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "InstanceType", + "smithy.api#documentation": "

Specifies the instance type to be supported by the Dedicated Hosts. If you specify an\n instance type, the Dedicated Hosts support instances of the specified instance type\n only.

\n

If you want the Dedicated Hosts to support multiple instance types in a specific\n instance family, omit this parameter and specify InstanceFamily instead. You cannot specify InstanceType and InstanceFamily in the\n same request.

", + "smithy.api#xmlName": "instanceType" + } + }, + "Quantity": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "Quantity", + "smithy.api#documentation": "

The number of Dedicated Hosts to allocate to your account with these parameters. If you are \n allocating the Dedicated Hosts on an Outpost, and you specify AssetIds, \n you can omit this parameter. In this case, Amazon EC2 allocates a Dedicated Host on each \n specified hardware asset. If you specify both AssetIds and \n Quantity, then the value that you specify for \n Quantity must be equal to the number of asset IDs specified.

", + "smithy.api#xmlName": "quantity" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Availability Zone in which to allocate the Dedicated Host.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "availabilityZone" + } } }, "traits": { @@ -2144,6 +2204,9 @@ { "target": "com.amazonaws.ec2#AcceptAddressTransfer" }, + { + "target": "com.amazonaws.ec2#AcceptCapacityReservationBillingOwnership" + }, { "target": "com.amazonaws.ec2#AcceptReservedInstancesExchangeQuote" }, @@ -2189,6 +2252,9 @@ { "target": "com.amazonaws.ec2#AssociateAddress" }, + { + "target": "com.amazonaws.ec2#AssociateCapacityReservationBillingOwner" + }, { "target": "com.amazonaws.ec2#AssociateClientVpnTargetNetwork" }, @@ -2837,6 +2903,9 @@ { "target": "com.amazonaws.ec2#DescribeCapacityBlockOfferings" }, + { + "target": "com.amazonaws.ec2#DescribeCapacityReservationBillingRequests" + }, { "target": "com.amazonaws.ec2#DescribeCapacityReservationFleets" }, @@ -3332,6 +3401,9 @@ { "target": "com.amazonaws.ec2#DisassociateAddress" }, + { + "target": "com.amazonaws.ec2#DisassociateCapacityReservationBillingOwner" + }, { "target": "com.amazonaws.ec2#DisassociateClientVpnTargetNetwork" }, @@ -3659,6 +3731,9 @@ { "target": "com.amazonaws.ec2#ModifyInstanceCapacityReservationAttributes" }, + { + "target": "com.amazonaws.ec2#ModifyInstanceCpuOptions" + }, { "target": "com.amazonaws.ec2#ModifyInstanceCreditSpecification" }, @@ -3860,6 +3935,9 @@ { "target": "com.amazonaws.ec2#RegisterTransitGatewayMulticastGroupSources" }, + { + "target": "com.amazonaws.ec2#RejectCapacityReservationBillingOwnership" + }, { "target": "com.amazonaws.ec2#RejectTransitGatewayMulticastDomainAssociations" }, @@ -5915,22 +5993,6 @@ "com.amazonaws.ec2#AssignIpv6AddressesRequest": { "type": "structure", "members": { - "Ipv6AddressCount": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6AddressCount", - "smithy.api#documentation": "

The number of additional IPv6 addresses to assign to the network interface. \n \t\tThe specified number of IPv6 addresses are assigned in addition to the \n \t\texisting IPv6 addresses that are already assigned to the network interface. \n \t\tAmazon EC2 automatically selects the IPv6 addresses from the subnet range. You \n \t\tcan't use this option if specifying specific IPv6 addresses.

", - "smithy.api#xmlName": "ipv6AddressCount" - } - }, - "Ipv6Addresses": { - "target": "com.amazonaws.ec2#Ipv6AddressList", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6Addresses", - "smithy.api#documentation": "

The IPv6 addresses to be assigned to the network interface. You can't use this option if you're specifying a number of IPv6 addresses.

", - "smithy.api#xmlName": "ipv6Addresses" - } - }, "Ipv6PrefixCount": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -5953,6 +6015,22 @@ "smithy.api#required": {}, "smithy.api#xmlName": "networkInterfaceId" } + }, + "Ipv6Addresses": { + "target": "com.amazonaws.ec2#Ipv6AddressList", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6Addresses", + "smithy.api#documentation": "

The IPv6 addresses to be assigned to the network interface. You can't use this option if you're specifying a number of IPv6 addresses.

", + "smithy.api#xmlName": "ipv6Addresses" + } + }, + "Ipv6AddressCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6AddressCount", + "smithy.api#documentation": "

The number of additional IPv6 addresses to assign to the network interface. \n \t\tThe specified number of IPv6 addresses are assigned in addition to the \n \t\texisting IPv6 addresses that are already assigned to the network interface. \n \t\tAmazon EC2 automatically selects the IPv6 addresses from the subnet range. You \n \t\tcan't use this option if specifying specific IPv6 addresses.

", + "smithy.api#xmlName": "ipv6AddressCount" + } } }, "traits": { @@ -6026,12 +6104,17 @@ "com.amazonaws.ec2#AssignPrivateIpAddressesRequest": { "type": "structure", "members": { - "AllowReassignment": { - "target": "com.amazonaws.ec2#Boolean", + "Ipv4Prefixes": { + "target": "com.amazonaws.ec2#IpPrefixList", "traits": { - "aws.protocols#ec2QueryName": "AllowReassignment", - "smithy.api#documentation": "

Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

", - "smithy.api#xmlName": "allowReassignment" + "smithy.api#documentation": "

One or more IPv4 prefixes assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.

", + "smithy.api#xmlName": "Ipv4Prefix" + } + }, + "Ipv4PrefixCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#documentation": "

The number of IPv4 prefixes that Amazon Web Services automatically assigns to the network interface. You cannot use this option if you use the Ipv4 Prefixes option.

" } }, "NetworkInterfaceId": { @@ -6060,17 +6143,12 @@ "smithy.api#xmlName": "secondaryPrivateIpAddressCount" } }, - "Ipv4Prefixes": { - "target": "com.amazonaws.ec2#IpPrefixList", - "traits": { - "smithy.api#documentation": "

One or more IPv4 prefixes assigned to the network interface. You cannot use this option if you use the Ipv4PrefixCount option.

", - "smithy.api#xmlName": "Ipv4Prefix" - } - }, - "Ipv4PrefixCount": { - "target": "com.amazonaws.ec2#Integer", + "AllowReassignment": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

The number of IPv4 prefixes that Amazon Web Services automatically assigns to the network interface. You cannot use this option if you use the Ipv4 Prefixes option.

" + "aws.protocols#ec2QueryName": "AllowReassignment", + "smithy.api#documentation": "

Indicates whether to allow an IP address that is already assigned to another network interface or instance to be reassigned to the specified network interface.

", + "smithy.api#xmlName": "allowReassignment" } } }, @@ -6264,14 +6342,6 @@ "smithy.api#documentation": "

Deprecated.

" } }, - "AllowReassociation": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "AllowReassociation", - "smithy.api#documentation": "

Reassociation is automatic, but you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

", - "smithy.api#xmlName": "allowReassociation" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -6295,6 +6365,14 @@ "smithy.api#documentation": "

The primary or secondary private IP address to associate with the Elastic IP address. If no private IP address is specified, the Elastic IP address is associated with the primary private IP address.

", "smithy.api#xmlName": "privateIpAddress" } + }, + "AllowReassociation": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "AllowReassociation", + "smithy.api#documentation": "

Reassociation is automatic, but you can specify false to ensure the operation fails if the Elastic IP address is already associated with another resource.

", + "smithy.api#xmlName": "allowReassociation" + } } }, "traits": { @@ -6317,6 +6395,64 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#AssociateCapacityReservationBillingOwner": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#AssociateCapacityReservationBillingOwnerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#AssociateCapacityReservationBillingOwnerResult" + }, + "traits": { + "smithy.api#documentation": "

Initiates a request to assign billing of the unused capacity of a shared Capacity Reservation to a consumer \n\t\t\taccount that is consolidated under the same Amazon Web Services organizations payer account. For more information, see \n\t\t\tBilling assignment for shared \n\t\t\t\tAmazon EC2 Capacity Reservations.

" + } + }, + "com.amazonaws.ec2#AssociateCapacityReservationBillingOwnerRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "CapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the Capacity Reservation.

", + "smithy.api#required": {} + } + }, + "UnusedReservationBillingOwnerId": { + "target": "com.amazonaws.ec2#AccountID", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the consumer account to which assign billing.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#AssociateCapacityReservationBillingOwnerResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, it returns an error.

", + "smithy.api#xmlName": "return" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#AssociateClientVpnTargetNetwork": { "type": "operation", "input": { @@ -6891,6 +7027,12 @@ "com.amazonaws.ec2#AssociateRouteTableRequest": { "type": "structure", "members": { + "GatewayId": { + "target": "com.amazonaws.ec2#RouteGatewayId", + "traits": { + "smithy.api#documentation": "

The ID of the internet gateway or virtual private gateway.

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -6899,16 +7041,6 @@ "smithy.api#xmlName": "dryRun" } }, - "RouteTableId": { - "target": "com.amazonaws.ec2#RouteTableId", - "traits": { - "aws.protocols#ec2QueryName": "RouteTableId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the route table.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "routeTableId" - } - }, "SubnetId": { "target": "com.amazonaws.ec2#SubnetId", "traits": { @@ -6917,10 +7049,14 @@ "smithy.api#xmlName": "subnetId" } }, - "GatewayId": { - "target": "com.amazonaws.ec2#RouteGatewayId", + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", "traits": { - "smithy.api#documentation": "

The ID of the internet gateway or virtual private gateway.

" + "aws.protocols#ec2QueryName": "RouteTableId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the route table.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "routeTableId" } } }, @@ -6967,12 +7103,16 @@ "com.amazonaws.ec2#AssociateSubnetCidrBlockRequest": { "type": "structure", "members": { - "Ipv6CidrBlock": { - "target": "com.amazonaws.ec2#String", + "Ipv6IpamPoolId": { + "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "aws.protocols#ec2QueryName": "Ipv6CidrBlock", - "smithy.api#documentation": "

The IPv6 CIDR block for your subnet.

", - "smithy.api#xmlName": "ipv6CidrBlock" + "smithy.api#documentation": "

An IPv6 IPAM pool ID.

" + } + }, + "Ipv6NetmaskLength": { + "target": "com.amazonaws.ec2#NetmaskLength", + "traits": { + "smithy.api#documentation": "

An IPv6 netmask length.

" } }, "SubnetId": { @@ -6985,16 +7125,12 @@ "smithy.api#xmlName": "subnetId" } }, - "Ipv6IpamPoolId": { - "target": "com.amazonaws.ec2#IpamPoolId", - "traits": { - "smithy.api#documentation": "

An IPv6 IPAM pool ID.

" - } - }, - "Ipv6NetmaskLength": { - "target": "com.amazonaws.ec2#NetmaskLength", + "Ipv6CidrBlock": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

An IPv6 netmask length.

" + "aws.protocols#ec2QueryName": "Ipv6CidrBlock", + "smithy.api#documentation": "

The IPv6 CIDR block for your subnet.

", + "smithy.api#xmlName": "ipv6CidrBlock" } } }, @@ -7308,30 +7444,12 @@ "com.amazonaws.ec2#AssociateVpcCidrBlockRequest": { "type": "structure", "members": { - "AmazonProvidedIpv6CidrBlock": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "AmazonProvidedIpv6CidrBlock", - "smithy.api#documentation": "

Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You\n cannot specify the range of IPv6 addresses or the size of the CIDR block.

", - "smithy.api#xmlName": "amazonProvidedIpv6CidrBlock" - } - }, "CidrBlock": { "target": "com.amazonaws.ec2#String", "traits": { "smithy.api#documentation": "

An IPv4 CIDR block to associate with the VPC.

" } }, - "VpcId": { - "target": "com.amazonaws.ec2#VpcId", - "traits": { - "aws.protocols#ec2QueryName": "VpcId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the VPC.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "vpcId" - } - }, "Ipv6CidrBlockNetworkBorderGroup": { "target": "com.amazonaws.ec2#String", "traits": { @@ -7373,6 +7491,24 @@ "traits": { "smithy.api#documentation": "

The netmask length of the IPv6 CIDR you would like to associate from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.

" } + }, + "VpcId": { + "target": "com.amazonaws.ec2#VpcId", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the VPC.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "vpcId" + } + }, + "AmazonProvidedIpv6CidrBlock": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "AmazonProvidedIpv6CidrBlock", + "smithy.api#documentation": "

Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. You\n cannot specify the range of IPv6 addresses or the size of the CIDR block.

", + "smithy.api#xmlName": "amazonProvidedIpv6CidrBlock" + } } }, "traits": { @@ -7646,15 +7782,6 @@ "smithy.api#xmlName": "dryRun" } }, - "Groups": { - "target": "com.amazonaws.ec2#GroupIdStringList", - "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The IDs of the security groups. You cannot specify security groups from a different VPC.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "SecurityGroupId" - } - }, "InstanceId": { "target": "com.amazonaws.ec2#InstanceId", "traits": { @@ -7674,6 +7801,15 @@ "smithy.api#required": {}, "smithy.api#xmlName": "vpcId" } + }, + "Groups": { + "target": "com.amazonaws.ec2#GroupIdStringList", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The IDs of the security groups. You cannot specify security groups from a different VPC.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "SecurityGroupId" + } } }, "traits": { @@ -7783,14 +7919,16 @@ "com.amazonaws.ec2#AttachNetworkInterfaceRequest": { "type": "structure", "members": { - "DeviceIndex": { + "NetworkCardIndex": { "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "DeviceIndex", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The index of the device for the network interface attachment.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "deviceIndex" + "smithy.api#documentation": "

The index of the network card. Some instance types support multiple network cards. \n The primary network interface must be assigned to network card index 0. \n The default is network card index 0.

" + } + }, + "EnaSrdSpecification": { + "target": "com.amazonaws.ec2#EnaSrdSpecification", + "traits": { + "smithy.api#documentation": "

Configures ENA Express for the network interface that this action attaches to the instance.

" } }, "DryRun": { @@ -7801,16 +7939,6 @@ "smithy.api#xmlName": "dryRun" } }, - "InstanceId": { - "target": "com.amazonaws.ec2#InstanceId", - "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the instance.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "instanceId" - } - }, "NetworkInterfaceId": { "target": "com.amazonaws.ec2#NetworkInterfaceId", "traits": { @@ -7821,16 +7949,24 @@ "smithy.api#xmlName": "networkInterfaceId" } }, - "NetworkCardIndex": { - "target": "com.amazonaws.ec2#Integer", + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", "traits": { - "smithy.api#documentation": "

The index of the network card. Some instance types support multiple network cards. \n The primary network interface must be assigned to network card index 0. \n The default is network card index 0.

" + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the instance.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "instanceId" } }, - "EnaSrdSpecification": { - "target": "com.amazonaws.ec2#EnaSrdSpecification", + "DeviceIndex": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

Configures ENA Express for the network interface that this action attaches to the instance.

" + "aws.protocols#ec2QueryName": "DeviceIndex", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The index of the device for the network interface attachment.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "deviceIndex" } } }, @@ -8016,7 +8152,7 @@ "target": "com.amazonaws.ec2#AttachVpnGatewayResult" }, "traits": { - "smithy.api#documentation": "

Attaches a virtual private gateway to a VPC. You can attach one virtual private\n gateway to one VPC at a time.

\n

For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN\n User Guide.

" + "smithy.api#documentation": "

Attaches an available virtual private gateway to a VPC. You can attach one virtual private\n gateway to one VPC at a time.

\n

For more information, see Amazon Web Services Site-to-Site VPN in the Amazon Web Services Site-to-Site VPN\n User Guide.

" } }, "com.amazonaws.ec2#AttachVpnGatewayRequest": { @@ -8375,6 +8511,13 @@ "com.amazonaws.ec2#AuthorizeSecurityGroupEgressRequest": { "type": "structure", "members": { + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags applied to the security group rule.

", + "smithy.api#xmlName": "TagSpecification" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -8393,27 +8536,28 @@ "smithy.api#xmlName": "groupId" } }, - "IpPermissions": { - "target": "com.amazonaws.ec2#IpPermissionList", + "SourceSecurityGroupName": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "IpPermissions", - "smithy.api#documentation": "

The permissions for the security group rules.

", - "smithy.api#xmlName": "ipPermissions" + "aws.protocols#ec2QueryName": "SourceSecurityGroupName", + "smithy.api#documentation": "

Not supported. Use IP permissions instead.

", + "smithy.api#xmlName": "sourceSecurityGroupName" } }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "SourceSecurityGroupOwnerId": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The tags applied to the security group rule.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "SourceSecurityGroupOwnerId", + "smithy.api#documentation": "

Not supported. Use IP permissions instead.

", + "smithy.api#xmlName": "sourceSecurityGroupOwnerId" } }, - "CidrIp": { + "IpProtocol": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "CidrIp", + "aws.protocols#ec2QueryName": "IpProtocol", "smithy.api#documentation": "

Not supported. Use IP permissions instead.

", - "smithy.api#xmlName": "cidrIp" + "smithy.api#xmlName": "ipProtocol" } }, "FromPort": { @@ -8424,14 +8568,6 @@ "smithy.api#xmlName": "fromPort" } }, - "IpProtocol": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "IpProtocol", - "smithy.api#documentation": "

Not supported. Use IP permissions instead.

", - "smithy.api#xmlName": "ipProtocol" - } - }, "ToPort": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -8440,20 +8576,20 @@ "smithy.api#xmlName": "toPort" } }, - "SourceSecurityGroupName": { + "CidrIp": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "SourceSecurityGroupName", + "aws.protocols#ec2QueryName": "CidrIp", "smithy.api#documentation": "

Not supported. Use IP permissions instead.

", - "smithy.api#xmlName": "sourceSecurityGroupName" + "smithy.api#xmlName": "cidrIp" } }, - "SourceSecurityGroupOwnerId": { - "target": "com.amazonaws.ec2#String", + "IpPermissions": { + "target": "com.amazonaws.ec2#IpPermissionList", "traits": { - "aws.protocols#ec2QueryName": "SourceSecurityGroupOwnerId", - "smithy.api#documentation": "

Not supported. Use IP permissions instead.

", - "smithy.api#xmlName": "sourceSecurityGroupOwnerId" + "aws.protocols#ec2QueryName": "IpPermissions", + "smithy.api#documentation": "

The permissions for the security group rules.

", + "smithy.api#xmlName": "ipPermissions" } } }, @@ -8619,6 +8755,13 @@ "smithy.api#documentation": "

If the protocol is TCP or UDP, this is the end of the port range.\n If the protocol is ICMP, this is the ICMP code or -1 (all ICMP codes). \n If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).

\n

To specify multiple rules and descriptions for the rules, use IP permissions instead.

" } }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags applied to the security group rule.

", + "smithy.api#xmlName": "TagSpecification" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -8626,13 +8769,6 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", - "traits": { - "smithy.api#documentation": "

The tags applied to the security group rule.

", - "smithy.api#xmlName": "TagSpecification" - } } }, "traits": { @@ -8720,14 +8856,6 @@ "com.amazonaws.ec2#AvailabilityZone": { "type": "structure", "members": { - "State": { - "target": "com.amazonaws.ec2#AvailabilityZoneState", - "traits": { - "aws.protocols#ec2QueryName": "ZoneState", - "smithy.api#documentation": "

The state of the Availability Zone, Local Zone, or Wavelength Zone. This value is always\n available.

", - "smithy.api#xmlName": "zoneState" - } - }, "OptInStatus": { "target": "com.amazonaws.ec2#AvailabilityZoneOptInStatus", "traits": { @@ -8807,6 +8935,14 @@ "smithy.api#documentation": "

The ID of the zone that handles some of the Local Zone or Wavelength Zone control plane\n operations, such as API calls.

", "smithy.api#xmlName": "parentZoneId" } + }, + "State": { + "target": "com.amazonaws.ec2#AvailabilityZoneState", + "traits": { + "aws.protocols#ec2QueryName": "ZoneState", + "smithy.api#documentation": "

The state of the Availability Zone, Local Zone, or Wavelength Zone. This value is always\n available.

", + "smithy.api#xmlName": "zoneState" + } } }, "traits": { @@ -9126,22 +9262,6 @@ "com.amazonaws.ec2#BlockDeviceMapping": { "type": "structure", "members": { - "DeviceName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DeviceName", - "smithy.api#documentation": "

The device name (for example, /dev/sdh or xvdh).

", - "smithy.api#xmlName": "deviceName" - } - }, - "VirtualName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VirtualName", - "smithy.api#documentation": "

The virtual device name (ephemeralN). Instance store volumes are numbered\n starting from 0. An instance type with 2 available instance store volumes can specify\n mappings for ephemeral0 and ephemeral1. The number of\n available instance store volumes depends on the instance type. After you connect to the\n instance, you must mount the volume.

\n

NVMe instance store volumes are automatically enumerated and assigned a device name.\n Including them in your block device mapping has no effect.

\n

Constraints: For M3 instances, you must specify instance store volumes in the block\n device mapping for the instance. When you launch an M3 instance, we ignore any instance\n store volumes specified in the block device mapping for the AMI.

", - "smithy.api#xmlName": "virtualName" - } - }, "Ebs": { "target": "com.amazonaws.ec2#EbsBlockDevice", "traits": { @@ -9157,6 +9277,22 @@ "smithy.api#documentation": "

To omit the device from the block device mapping, specify an empty string. When this\n property is specified, the device is removed from the block device mapping regardless of\n the assigned value.

", "smithy.api#xmlName": "noDevice" } + }, + "DeviceName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DeviceName", + "smithy.api#documentation": "

The device name (for example, /dev/sdh or xvdh).

", + "smithy.api#xmlName": "deviceName" + } + }, + "VirtualName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VirtualName", + "smithy.api#documentation": "

The virtual device name (ephemeralN). Instance store volumes are numbered\n starting from 0. An instance type with 2 available instance store volumes can specify\n mappings for ephemeral0 and ephemeral1. The number of\n available instance store volumes depends on the instance type. After you connect to the\n instance, you must mount the volume.

\n

NVMe instance store volumes are automatically enumerated and assigned a device name.\n Including them in your block device mapping has no effect.

\n

Constraints: For M3 instances, you must specify instance store volumes in the block\n device mapping for the instance. When you launch an M3 instance, we ignore any instance\n store volumes specified in the block device mapping for the AMI.

", + "smithy.api#xmlName": "virtualName" + } } }, "traits": { @@ -9316,22 +9452,6 @@ "com.amazonaws.ec2#BundleTask": { "type": "structure", "members": { - "BundleId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "BundleId", - "smithy.api#documentation": "

The ID of the bundle task.

", - "smithy.api#xmlName": "bundleId" - } - }, - "BundleTaskError": { - "target": "com.amazonaws.ec2#BundleTaskError", - "traits": { - "aws.protocols#ec2QueryName": "Error", - "smithy.api#documentation": "

If the task fails, a description of the error.

", - "smithy.api#xmlName": "error" - } - }, "InstanceId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -9340,12 +9460,20 @@ "smithy.api#xmlName": "instanceId" } }, - "Progress": { + "BundleId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "Progress", - "smithy.api#documentation": "

The level of task completion, as a percent (for example, 20%).

", - "smithy.api#xmlName": "progress" + "aws.protocols#ec2QueryName": "BundleId", + "smithy.api#documentation": "

The ID of the bundle task.

", + "smithy.api#xmlName": "bundleId" + } + }, + "State": { + "target": "com.amazonaws.ec2#BundleTaskState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

The state of the task.

", + "smithy.api#xmlName": "state" } }, "StartTime": { @@ -9356,12 +9484,12 @@ "smithy.api#xmlName": "startTime" } }, - "State": { - "target": "com.amazonaws.ec2#BundleTaskState", + "UpdateTime": { + "target": "com.amazonaws.ec2#DateTime", "traits": { - "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The state of the task.

", - "smithy.api#xmlName": "state" + "aws.protocols#ec2QueryName": "UpdateTime", + "smithy.api#documentation": "

The time of the most recent update for the task.

", + "smithy.api#xmlName": "updateTime" } }, "Storage": { @@ -9372,12 +9500,20 @@ "smithy.api#xmlName": "storage" } }, - "UpdateTime": { - "target": "com.amazonaws.ec2#DateTime", + "Progress": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "UpdateTime", - "smithy.api#documentation": "

The time of the most recent update for the task.

", - "smithy.api#xmlName": "updateTime" + "aws.protocols#ec2QueryName": "Progress", + "smithy.api#documentation": "

The level of task completion, as a percent (for example, 20%).

", + "smithy.api#xmlName": "progress" + } + }, + "BundleTaskError": { + "target": "com.amazonaws.ec2#BundleTaskError", + "traits": { + "aws.protocols#ec2QueryName": "Error", + "smithy.api#documentation": "

If the task fails, a description of the error.

", + "smithy.api#xmlName": "error" } } }, @@ -9658,6 +9794,23 @@ } } }, + "com.amazonaws.ec2#CallerRole": { + "type": "enum", + "members": { + "odcr_owner": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "odcr-owner" + } + }, + "unused_reservation_billing_owner": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "unused-reservation-billing-owner" + } + } + } + }, "com.amazonaws.ec2#CancelBatchErrorCode": { "type": "enum", "members": { @@ -9883,6 +10036,14 @@ "com.amazonaws.ec2#CancelConversionRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "ConversionTaskId": { "target": "com.amazonaws.ec2#ConversionTaskId", "traits": { @@ -9893,14 +10054,6 @@ "smithy.api#xmlName": "conversionTaskId" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "ReasonMessage": { "target": "com.amazonaws.ec2#String", "traits": { @@ -9923,7 +10076,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "

Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all\n artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is\n in the process of transferring the final disk image, the command fails and returns an exception.

\n

For more information, see Importing a Virtual Machine Using the Amazon\n EC2 CLI.

" + "smithy.api#documentation": "

Cancels an active conversion task. The task can be the import of an instance or volume. The action removes all\n artifacts of the conversion, including a partially uploaded volume or instance. If the conversion is complete or is\n in the process of transferring the final disk image, the command fails and returns an exception.

" } }, "com.amazonaws.ec2#CancelExportTask": { @@ -10802,12 +10955,134 @@ "smithy.api#documentation": "

The type of Capacity Reservation.

", "smithy.api#xmlName": "reservationType" } + }, + "UnusedReservationBillingOwnerId": { + "target": "com.amazonaws.ec2#AccountID", + "traits": { + "aws.protocols#ec2QueryName": "UnusedReservationBillingOwnerId", + "smithy.api#documentation": "

The ID of the Amazon Web Services account to which billing of the unused capacity \n\t\t\tof the Capacity Reservation is assigned.

", + "smithy.api#xmlName": "unusedReservationBillingOwnerId" + } } }, "traits": { "smithy.api#documentation": "

Describes a Capacity Reservation.

" } }, + "com.amazonaws.ec2#CapacityReservationBillingRequest": { + "type": "structure", + "members": { + "CapacityReservationId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CapacityReservationId", + "smithy.api#documentation": "

The ID of the Capacity Reservation.

", + "smithy.api#xmlName": "capacityReservationId" + } + }, + "RequestedBy": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "RequestedBy", + "smithy.api#documentation": "

The ID of the Amazon Web Services account that initiated the request.

", + "smithy.api#xmlName": "requestedBy" + } + }, + "UnusedReservationBillingOwnerId": { + "target": "com.amazonaws.ec2#AccountID", + "traits": { + "aws.protocols#ec2QueryName": "UnusedReservationBillingOwnerId", + "smithy.api#documentation": "

The ID of the Amazon Web Services account to which the request was sent.

", + "smithy.api#xmlName": "unusedReservationBillingOwnerId" + } + }, + "LastUpdateTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "LastUpdateTime", + "smithy.api#documentation": "

The date and time, in UTC time format, at which the request was initiated.

", + "smithy.api#xmlName": "lastUpdateTime" + } + }, + "Status": { + "target": "com.amazonaws.ec2#CapacityReservationBillingRequestStatus", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "

The status of the request. For more information, see \n\t\t\tView billing assignment requests for a shared Amazon EC2 Capacity Reservation.

", + "smithy.api#xmlName": "status" + } + }, + "StatusMessage": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "StatusMessage", + "smithy.api#documentation": "

Information about the status.

", + "smithy.api#xmlName": "statusMessage" + } + }, + "CapacityReservationInfo": { + "target": "com.amazonaws.ec2#CapacityReservationInfo", + "traits": { + "aws.protocols#ec2QueryName": "CapacityReservationInfo", + "smithy.api#documentation": "

Information about the Capacity Reservation.

", + "smithy.api#xmlName": "capacityReservationInfo" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about a request to assign billing of the unused capacity of a Capacity Reservation.

" + } + }, + "com.amazonaws.ec2#CapacityReservationBillingRequestSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#CapacityReservationBillingRequest", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, + "com.amazonaws.ec2#CapacityReservationBillingRequestStatus": { + "type": "enum", + "members": { + "pending": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pending" + } + }, + "accepted": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "accepted" + } + }, + "rejected": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "rejected" + } + }, + "cancelled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "cancelled" + } + }, + "revoked": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "revoked" + } + }, + "expired": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "expired" + } + } + } + }, "com.amazonaws.ec2#CapacityReservationFleet": { "type": "structure", "members": { @@ -11078,6 +11353,38 @@ } } }, + "com.amazonaws.ec2#CapacityReservationInfo": { + "type": "structure", + "members": { + "InstanceType": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "InstanceType", + "smithy.api#documentation": "

The instance type for the Capacity Reservation.

", + "smithy.api#xmlName": "instanceType" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#AvailabilityZoneName", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone for the Capacity Reservation.

", + "smithy.api#xmlName": "availabilityZone" + } + }, + "Tenancy": { + "target": "com.amazonaws.ec2#CapacityReservationTenancy", + "traits": { + "aws.protocols#ec2QueryName": "Tenancy", + "smithy.api#documentation": "

The tenancy of the Capacity Reservation.

", + "smithy.api#xmlName": "tenancy" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about a Capacity Reservation.

" + } + }, "com.amazonaws.ec2#CapacityReservationInstancePlatform": { "type": "enum", "members": { @@ -12976,7 +13283,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -12988,14 +13295,6 @@ "com.amazonaws.ec2#ConfirmProductInstanceResult": { "type": "structure", "members": { - "OwnerId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "OwnerId", - "smithy.api#documentation": "

The Amazon Web Services account ID of the instance owner. This is only present if the\n product code is attached to the instance.

", - "smithy.api#xmlName": "ownerId" - } - }, "Return": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -13003,6 +13302,14 @@ "smithy.api#documentation": "

The return value of the request. Returns true if the specified product\n code is owned by the requester and associated with the specified instance.

", "smithy.api#xmlName": "return" } + }, + "OwnerId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "OwnerId", + "smithy.api#documentation": "

The Amazon Web Services account ID of the instance owner. This is only present if the\n product code is attached to the instance.

", + "smithy.api#xmlName": "ownerId" + } } }, "traits": { @@ -13605,14 +13912,6 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.

\n

For more information, see Copy AMIs from an Amazon Web Services\n Region to an Outpost in the Amazon EBS User Guide.

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "CopyImageTags": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -13625,6 +13924,14 @@ "smithy.api#documentation": "

The tags to apply to the new AMI and new snapshots. You can tag the AMI, the snapshots, or\n both.

\n
    \n
  • \n

    To tag the new AMI, the value for ResourceType must be\n image.

    \n
  • \n
  • \n

    To tag the new snapshots, the value for ResourceType must be\n snapshot. The same tag is applied to all the new snapshots.

    \n
  • \n
\n

If you specify other values for ResourceType, the request fails.

\n

To tag an AMI or snapshot after it has been created, see CreateTags.

", "smithy.api#xmlName": "TagSpecification" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -13768,14 +14075,6 @@ "com.amazonaws.ec2#CopySnapshotResult": { "type": "structure", "members": { - "SnapshotId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "SnapshotId", - "smithy.api#documentation": "

The ID of the new snapshot.

", - "smithy.api#xmlName": "snapshotId" - } - }, "Tags": { "target": "com.amazonaws.ec2#TagList", "traits": { @@ -13783,6 +14082,14 @@ "smithy.api#documentation": "

Any tags applied to the new snapshot.

", "smithy.api#xmlName": "tagSet" } + }, + "SnapshotId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

The ID of the new snapshot.

", + "smithy.api#xmlName": "snapshotId" + } } }, "traits": { @@ -14821,6 +15128,12 @@ "smithy.api#documentation": "

IPv4 address for the customer gateway device's outside interface. The address must be\n static. If OutsideIpAddressType in your VPN connection options is set to\n PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If\n OutsideIpAddressType is set to PublicIpv4, you can use a\n public IPv4 address.

" } }, + "BgpAsnExtended": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "smithy.api#documentation": "

For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.

\n

Valid values: 2,147,483,648 to 4,294,967,295\n

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -14828,12 +15141,6 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually\n making the request, and provides an error response. If you have the required\n permissions, the error response is DryRunOperation. Otherwise, it is\n UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } - }, - "BgpAsnExtended": { - "target": "com.amazonaws.ec2#Long", - "traits": { - "smithy.api#documentation": "

For customer gateway devices that support BGP, specify the device's ASN. You must specify either BgpAsn or BgpAsnExtended when creating the customer gateway. If the ASN is larger than 2,147,483,647, you must use BgpAsnExtended.

\n

Valid values: 2,147,483,648 to 4,294,967,295\n

" - } } }, "traits": { @@ -15223,7 +15530,7 @@ "target": "com.amazonaws.ec2#PlatformValues", "traits": { "aws.protocols#ec2QueryName": "Platform", - "smithy.api#documentation": "

The value is Windows for Windows instances. Otherwise, the value is\n blank.

", + "smithy.api#documentation": "

The value is windows for Windows instances in an EC2 Fleet. Otherwise, the value is\n blank.

", "smithy.api#xmlName": "platform" } } @@ -15616,20 +15923,11 @@ "com.amazonaws.ec2#CreateImageRequest": { "type": "structure", "members": { - "BlockDeviceMappings": { - "target": "com.amazonaws.ec2#BlockDeviceMappingRequestList", - "traits": { - "aws.protocols#ec2QueryName": "BlockDeviceMapping", - "smithy.api#documentation": "

The block device mappings.

\n

When using the CreateImage action:

\n
    \n
  • \n

    You can't change the volume size using the VolumeSize parameter. If you want a\n different volume size, you must first change the volume size of the source\n instance.

    \n
  • \n
  • \n

    You can't modify the encryption status of existing volumes or snapshots. To create an\n AMI with volumes or snapshots that have a different encryption status (for example, where\n the source volume and snapshots are unencrypted, and you want to create an AMI with\n encrypted volumes or snapshots), use the CopyImage action.

    \n
  • \n
  • \n

    The only option that can be changed for existing mappings or snapshots is\n DeleteOnTermination.

    \n
  • \n
", - "smithy.api#xmlName": "blockDeviceMapping" - } - }, - "Description": { - "target": "com.amazonaws.ec2#String", + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

A description for the new image.

", - "smithy.api#xmlName": "description" + "smithy.api#documentation": "

The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the\n snapshots, or both.

\n
    \n
  • \n

    To tag the AMI, the value for ResourceType must be\n image.

    \n
  • \n
  • \n

    To tag the snapshots that are created of the root volume and of other Amazon EBS volumes that\n are attached to the instance, the value for ResourceType must be\n snapshot. The same tag is applied to all of the snapshots that are\n created.

    \n
  • \n
\n

If you specify other values for ResourceType, the request fails.

\n

To tag an AMI or snapshot after it has been created, see CreateTags.

", + "smithy.api#xmlName": "TagSpecification" } }, "DryRun": { @@ -15660,6 +15958,14 @@ "smithy.api#xmlName": "name" } }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

A description for the new image.

", + "smithy.api#xmlName": "description" + } + }, "NoReboot": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -15668,11 +15974,12 @@ "smithy.api#xmlName": "noReboot" } }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "BlockDeviceMappings": { + "target": "com.amazonaws.ec2#BlockDeviceMappingRequestList", "traits": { - "smithy.api#documentation": "

The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the\n snapshots, or both.

\n
    \n
  • \n

    To tag the AMI, the value for ResourceType must be\n image.

    \n
  • \n
  • \n

    To tag the snapshots that are created of the root volume and of other Amazon EBS volumes that\n are attached to the instance, the value for ResourceType must be\n snapshot. The same tag is applied to all of the snapshots that are\n created.

    \n
  • \n
\n

If you specify other values for ResourceType, the request fails.

\n

To tag an AMI or snapshot after it has been created, see CreateTags.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "BlockDeviceMapping", + "smithy.api#documentation": "

The block device mappings.

\n

When using the CreateImage action:

\n
    \n
  • \n

    You can't change the volume size using the VolumeSize parameter. If you want a\n different volume size, you must first change the volume size of the source\n instance.

    \n
  • \n
  • \n

    You can't modify the encryption status of existing volumes or snapshots. To create an\n AMI with volumes or snapshots that have a different encryption status (for example, where\n the source volume and snapshots are unencrypted, and you want to create an AMI with\n encrypted volumes or snapshots), use the CopyImage action.

    \n
  • \n
  • \n

    The only option that can be changed for existing mappings or snapshots is\n DeleteOnTermination.

    \n
  • \n
", + "smithy.api#xmlName": "blockDeviceMapping" } } }, @@ -15864,6 +16171,13 @@ "com.amazonaws.ec2#CreateInstanceExportTaskRequest": { "type": "structure", "members": { + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to apply to the export instance task during creation.

", + "smithy.api#xmlName": "TagSpecification" + } + }, "Description": { "target": "com.amazonaws.ec2#String", "traits": { @@ -15872,16 +16186,6 @@ "smithy.api#xmlName": "description" } }, - "ExportToS3Task": { - "target": "com.amazonaws.ec2#ExportToS3TaskSpecification", - "traits": { - "aws.protocols#ec2QueryName": "ExportToS3", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The format and location for an export instance task.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "exportToS3" - } - }, "InstanceId": { "target": "com.amazonaws.ec2#InstanceId", "traits": { @@ -15902,11 +16206,14 @@ "smithy.api#xmlName": "targetEnvironment" } }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "ExportToS3Task": { + "target": "com.amazonaws.ec2#ExportToS3TaskSpecification", "traits": { - "smithy.api#documentation": "

The tags to apply to the export instance task during creation.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "ExportToS3", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The format and location for an export instance task.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "exportToS3" } } }, @@ -16102,7 +16409,7 @@ "Locale": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The locale for the pool should be one of the following:

\n
    \n
  • \n

    An Amazon Web Services Region where you want this IPAM pool to be available for allocations.

    \n
  • \n
  • \n

    The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope.

    \n
  • \n
\n

If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool.

\n

Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone.

" + "smithy.api#documentation": "

The locale for the pool should be one of the following:

\n
    \n
  • \n

    An Amazon Web Services Region where you want this IPAM pool to be available for allocations.

    \n
  • \n
  • \n

    The network border group for an Amazon Web Services Local Zone where you want this IPAM pool to be available for allocations (supported Local Zones). This option is only available for IPAM IPv4 pools in the public scope.

    \n
  • \n
\n

If you do not choose a locale, resources in Regions others than the IPAM's home region cannot use CIDRs from this pool.

\n

Possible values: Any Amazon Web Services Region or supported Amazon Web Services Local Zone. Default is none and means any locale.

" } }, "SourceIpamPoolId": { @@ -16455,14 +16762,6 @@ "smithy.api#required": {} } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "KeyType": { "target": "com.amazonaws.ec2#KeyType", "traits": { @@ -16481,6 +16780,14 @@ "traits": { "smithy.api#documentation": "

The format of the key pair.

\n

Default: pem\n

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -17317,14 +17624,6 @@ "com.amazonaws.ec2#CreateNetworkAclEntryRequest": { "type": "structure", "members": { - "CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CidrBlock", - "smithy.api#documentation": "

The IPv4 network range to allow or deny, in CIDR notation (for example\n\t\t 172.16.0.0/24). We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", - "smithy.api#xmlName": "cidrBlock" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -17333,31 +17632,6 @@ "smithy.api#xmlName": "dryRun" } }, - "Egress": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "Egress", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

", - "smithy.api#required": {}, - "smithy.api#xmlName": "egress" - } - }, - "IcmpTypeCode": { - "target": "com.amazonaws.ec2#IcmpTypeCode", - "traits": { - "smithy.api#documentation": "

ICMP protocol: The ICMP or ICMPv6 type and code. Required if specifying protocol \n\t\t 1 (ICMP) or protocol 58 (ICMPv6) with an IPv6 CIDR block.

", - "smithy.api#xmlName": "Icmp" - } - }, - "Ipv6CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6CidrBlock", - "smithy.api#documentation": "

The IPv6 network range to allow or deny, in CIDR notation (for example\n 2001:db8:1234:1a00::/64).

", - "smithy.api#xmlName": "ipv6CidrBlock" - } - }, "NetworkAclId": { "target": "com.amazonaws.ec2#NetworkAclId", "traits": { @@ -17368,12 +17642,14 @@ "smithy.api#xmlName": "networkAclId" } }, - "PortRange": { - "target": "com.amazonaws.ec2#PortRange", + "RuleNumber": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "PortRange", - "smithy.api#documentation": "

TCP or UDP protocols: The range of ports the rule applies to.\n\t\t Required if specifying protocol 6 (TCP) or 17 (UDP).

", - "smithy.api#xmlName": "portRange" + "aws.protocols#ec2QueryName": "RuleNumber", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

\n

Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is reserved for internal use.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "ruleNumber" } }, "Protocol": { @@ -17396,14 +17672,45 @@ "smithy.api#xmlName": "ruleAction" } }, - "RuleNumber": { - "target": "com.amazonaws.ec2#Integer", + "Egress": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "RuleNumber", + "aws.protocols#ec2QueryName": "Egress", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.

\n

Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 is reserved for internal use.

", + "smithy.api#documentation": "

Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet).

", "smithy.api#required": {}, - "smithy.api#xmlName": "ruleNumber" + "smithy.api#xmlName": "egress" + } + }, + "CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CidrBlock", + "smithy.api#documentation": "

The IPv4 network range to allow or deny, in CIDR notation (for example\n\t\t 172.16.0.0/24). We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", + "smithy.api#xmlName": "cidrBlock" + } + }, + "Ipv6CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6CidrBlock", + "smithy.api#documentation": "

The IPv6 network range to allow or deny, in CIDR notation (for example\n 2001:db8:1234:1a00::/64).

", + "smithy.api#xmlName": "ipv6CidrBlock" + } + }, + "IcmpTypeCode": { + "target": "com.amazonaws.ec2#IcmpTypeCode", + "traits": { + "smithy.api#documentation": "

ICMP protocol: The ICMP or ICMPv6 type and code. Required if specifying protocol \n\t\t 1 (ICMP) or protocol 58 (ICMPv6) with an IPv6 CIDR block.

", + "smithy.api#xmlName": "Icmp" + } + }, + "PortRange": { + "target": "com.amazonaws.ec2#PortRange", + "traits": { + "aws.protocols#ec2QueryName": "PortRange", + "smithy.api#documentation": "

TCP or UDP protocols: The range of ports the rule applies to.\n\t\t Required if specifying protocol 6 (TCP) or 17 (UDP).

", + "smithy.api#xmlName": "portRange" } } }, @@ -17414,6 +17721,20 @@ "com.amazonaws.ec2#CreateNetworkAclRequest": { "type": "structure", "members": { + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to assign to the network ACL.

", + "smithy.api#xmlName": "TagSpecification" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -17431,20 +17752,6 @@ "smithy.api#required": {}, "smithy.api#xmlName": "vpcId" } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", - "traits": { - "smithy.api#documentation": "

The tags to assign to the network ACL.

", - "smithy.api#xmlName": "TagSpecification" - } - }, - "ClientToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", - "smithy.api#idempotencyToken": {} - } } }, "traits": { @@ -17752,69 +18059,6 @@ "com.amazonaws.ec2#CreateNetworkInterfaceRequest": { "type": "structure", "members": { - "Description": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

A description for the network interface.

", - "smithy.api#xmlName": "description" - } - }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, - "Groups": { - "target": "com.amazonaws.ec2#SecurityGroupIdStringList", - "traits": { - "smithy.api#documentation": "

The IDs of one or more security groups.

", - "smithy.api#xmlName": "SecurityGroupId" - } - }, - "Ipv6AddressCount": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6AddressCount", - "smithy.api#documentation": "

The number of IPv6 addresses to assign to a network interface. Amazon EC2\n automatically selects the IPv6 addresses from the subnet range.

\n

You can't specify a count of IPv6 addresses using this parameter if you've specified \n one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.

\n

If your subnet has the AssignIpv6AddressOnCreation attribute set, you can\n override that setting by specifying 0 as the IPv6 address count.

", - "smithy.api#xmlName": "ipv6AddressCount" - } - }, - "Ipv6Addresses": { - "target": "com.amazonaws.ec2#InstanceIpv6AddressList", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6Addresses", - "smithy.api#documentation": "

The IPv6 addresses from the IPv6 CIDR block range of your subnet.

\n

You can't specify IPv6 addresses using this parameter if you've specified one of the \n following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.

", - "smithy.api#xmlName": "ipv6Addresses" - } - }, - "PrivateIpAddress": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PrivateIpAddress", - "smithy.api#documentation": "

The primary private IPv4 address of the network interface. If you don't specify an\n IPv4 address, Amazon EC2 selects one for you from the subnet's IPv4 CIDR range. If you\n specify an IP address, you cannot indicate any IP addresses specified in\n privateIpAddresses as primary (only one IP address can be designated as\n primary).

", - "smithy.api#xmlName": "privateIpAddress" - } - }, - "PrivateIpAddresses": { - "target": "com.amazonaws.ec2#PrivateIpAddressSpecificationList", - "traits": { - "aws.protocols#ec2QueryName": "PrivateIpAddresses", - "smithy.api#documentation": "

The private IPv4 addresses.

\n

You can't specify private IPv4 addresses if you've specified one of the following:\n a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.

", - "smithy.api#xmlName": "privateIpAddresses" - } - }, - "SecondaryPrivateIpAddressCount": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "SecondaryPrivateIpAddressCount", - "smithy.api#documentation": "

The number of secondary private IPv4 addresses to assign to a network interface. When\n you specify a number of secondary IPv4 addresses, Amazon EC2 selects these IP addresses\n within the subnet's IPv4 CIDR range. You can't specify this option and specify more than\n one private IP address using privateIpAddresses.

\n

You can't specify a count of private IPv4 addresses if you've specified one of the following:\n specific private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.

", - "smithy.api#xmlName": "secondaryPrivateIpAddressCount" - } - }, "Ipv4Prefixes": { "target": "com.amazonaws.ec2#Ipv4PrefixList", "traits": { @@ -17847,16 +18091,6 @@ "smithy.api#documentation": "

The type of network interface. The default is interface.

\n

The only supported values are interface, efa, and trunk.

" } }, - "SubnetId": { - "target": "com.amazonaws.ec2#SubnetId", - "traits": { - "aws.protocols#ec2QueryName": "SubnetId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the subnet to associate with the network interface.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "subnetId" - } - }, "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { @@ -17882,6 +18116,79 @@ "traits": { "smithy.api#documentation": "

A connection tracking specification for the network interface.

" } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#SubnetId", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the subnet to associate with the network interface.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "subnetId" + } + }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

A description for the network interface.

", + "smithy.api#xmlName": "description" + } + }, + "PrivateIpAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PrivateIpAddress", + "smithy.api#documentation": "

The primary private IPv4 address of the network interface. If you don't specify an\n IPv4 address, Amazon EC2 selects one for you from the subnet's IPv4 CIDR range. If you\n specify an IP address, you cannot indicate any IP addresses specified in\n privateIpAddresses as primary (only one IP address can be designated as\n primary).

", + "smithy.api#xmlName": "privateIpAddress" + } + }, + "Groups": { + "target": "com.amazonaws.ec2#SecurityGroupIdStringList", + "traits": { + "smithy.api#documentation": "

The IDs of one or more security groups.

", + "smithy.api#xmlName": "SecurityGroupId" + } + }, + "PrivateIpAddresses": { + "target": "com.amazonaws.ec2#PrivateIpAddressSpecificationList", + "traits": { + "aws.protocols#ec2QueryName": "PrivateIpAddresses", + "smithy.api#documentation": "

The private IPv4 addresses.

\n

You can't specify private IPv4 addresses if you've specified one of the following:\n a count of private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.

", + "smithy.api#xmlName": "privateIpAddresses" + } + }, + "SecondaryPrivateIpAddressCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "SecondaryPrivateIpAddressCount", + "smithy.api#documentation": "

The number of secondary private IPv4 addresses to assign to a network interface. When\n you specify a number of secondary IPv4 addresses, Amazon EC2 selects these IP addresses\n within the subnet's IPv4 CIDR range. You can't specify this option and specify more than\n one private IP address using privateIpAddresses.

\n

You can't specify a count of private IPv4 addresses if you've specified one of the following:\n specific private IPv4 addresses, specific IPv4 prefixes, or a count of IPv4 prefixes.

", + "smithy.api#xmlName": "secondaryPrivateIpAddressCount" + } + }, + "Ipv6Addresses": { + "target": "com.amazonaws.ec2#InstanceIpv6AddressList", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6Addresses", + "smithy.api#documentation": "

The IPv6 addresses from the IPv6 CIDR block range of your subnet.

\n

You can't specify IPv6 addresses using this parameter if you've specified one of the \n following: a count of IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.

", + "smithy.api#xmlName": "ipv6Addresses" + } + }, + "Ipv6AddressCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6AddressCount", + "smithy.api#documentation": "

The number of IPv6 addresses to assign to a network interface. Amazon EC2\n automatically selects the IPv6 addresses from the subnet range.

\n

You can't specify a count of IPv6 addresses using this parameter if you've specified \n one of the following: specific IPv6 addresses, specific IPv6 prefixes, or a count of IPv6 prefixes.

\n

If your subnet has the AssignIpv6AddressOnCreation attribute set, you can\n override that setting by specifying 0 as the IPv6 address count.

", + "smithy.api#xmlName": "ipv6AddressCount" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -17938,11 +18245,30 @@ "com.amazonaws.ec2#CreatePlacementGroupRequest": { "type": "structure", "members": { + "PartitionCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#documentation": "

The number of partitions. Valid only when Strategy is\n set to partition.

" + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to apply to the new placement group.

", + "smithy.api#xmlName": "TagSpecification" + } + }, + "SpreadLevel": { + "target": "com.amazonaws.ec2#SpreadLevel", + "traits": { + "smithy.api#documentation": "

Determines how placement groups spread instances.

\n
    \n
  • \n

    Host – You can use host only with Outpost placement\n groups.

    \n
  • \n
  • \n

    Rack – No usage restrictions.

    \n
  • \n
" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, @@ -17961,25 +18287,6 @@ "smithy.api#documentation": "

The placement strategy.

", "smithy.api#xmlName": "strategy" } - }, - "PartitionCount": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "smithy.api#documentation": "

The number of partitions. Valid only when Strategy is\n set to partition.

" - } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", - "traits": { - "smithy.api#documentation": "

The tags to apply to the new placement group.

", - "smithy.api#xmlName": "TagSpecification" - } - }, - "SpreadLevel": { - "target": "com.amazonaws.ec2#SpreadLevel", - "traits": { - "smithy.api#documentation": "

Determines how placement groups spread instances.

\n
    \n
  • \n

    Host – You can use host only with Outpost placement\n groups.

    \n
  • \n
  • \n

    Rack – No usage restrictions.

    \n
  • \n
" - } } }, "traits": { @@ -18154,14 +18461,14 @@ "com.amazonaws.ec2#CreateReservedInstancesListingRequest": { "type": "structure", "members": { - "ClientToken": { - "target": "com.amazonaws.ec2#String", + "ReservedInstancesId": { + "target": "com.amazonaws.ec2#ReservationId", "traits": { - "aws.protocols#ec2QueryName": "ClientToken", + "aws.protocols#ec2QueryName": "ReservedInstancesId", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure idempotency of your\n\t\t\t\tlistings. This helps avoid duplicate listings. For more information, see \n\t\t\t\tEnsuring Idempotency.

", + "smithy.api#documentation": "

The ID of the active Standard Reserved Instance.

", "smithy.api#required": {}, - "smithy.api#xmlName": "clientToken" + "smithy.api#xmlName": "reservedInstancesId" } }, "InstanceCount": { @@ -18184,14 +18491,14 @@ "smithy.api#xmlName": "priceSchedules" } }, - "ReservedInstancesId": { - "target": "com.amazonaws.ec2#ReservationId", + "ClientToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "ReservedInstancesId", + "aws.protocols#ec2QueryName": "ClientToken", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the active Standard Reserved Instance.

", + "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure idempotency of your\n\t\t\t\tlistings. This helps avoid duplicate listings. For more information, see \n\t\t\t\tEnsuring Idempotency.

", "smithy.api#required": {}, - "smithy.api#xmlName": "reservedInstancesId" + "smithy.api#xmlName": "clientToken" } } }, @@ -18314,26 +18621,40 @@ "com.amazonaws.ec2#CreateRouteRequest": { "type": "structure", "members": { - "DestinationCidrBlock": { - "target": "com.amazonaws.ec2#String", + "DestinationPrefixListId": { + "target": "com.amazonaws.ec2#PrefixListResourceId", "traits": { - "aws.protocols#ec2QueryName": "DestinationCidrBlock", - "smithy.api#documentation": "

The IPv4 CIDR address block used for the destination match. Routing decisions are based on the most specific match. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", - "smithy.api#xmlName": "destinationCidrBlock" + "smithy.api#documentation": "

The ID of a prefix list used for the destination match.

" } }, - "DestinationIpv6CidrBlock": { - "target": "com.amazonaws.ec2#String", + "VpcEndpointId": { + "target": "com.amazonaws.ec2#VpcEndpointId", "traits": { - "aws.protocols#ec2QueryName": "DestinationIpv6CidrBlock", - "smithy.api#documentation": "

The IPv6 CIDR block used for the destination match. Routing decisions are based on the most specific match.

", - "smithy.api#xmlName": "destinationIpv6CidrBlock" + "smithy.api#documentation": "

The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.

" } }, - "DestinationPrefixListId": { - "target": "com.amazonaws.ec2#PrefixListResourceId", + "TransitGatewayId": { + "target": "com.amazonaws.ec2#TransitGatewayId", "traits": { - "smithy.api#documentation": "

The ID of a prefix list used for the destination match.

" + "smithy.api#documentation": "

The ID of a transit gateway.

" + } + }, + "LocalGatewayId": { + "target": "com.amazonaws.ec2#LocalGatewayId", + "traits": { + "smithy.api#documentation": "

The ID of the local gateway.

" + } + }, + "CarrierGatewayId": { + "target": "com.amazonaws.ec2#CarrierGatewayId", + "traits": { + "smithy.api#documentation": "

The ID of the carrier gateway.

\n

You can only use this option when the VPC contains a subnet which is associated with a Wavelength Zone.

" + } + }, + "CoreNetworkArn": { + "target": "com.amazonaws.ec2#CoreNetworkArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the core network.

" } }, "DryRun": { @@ -18344,18 +18665,22 @@ "smithy.api#xmlName": "dryRun" } }, - "VpcEndpointId": { - "target": "com.amazonaws.ec2#VpcEndpointId", + "RouteTableId": { + "target": "com.amazonaws.ec2#RouteTableId", "traits": { - "smithy.api#documentation": "

The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.

" + "aws.protocols#ec2QueryName": "RouteTableId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the route table for the route.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "routeTableId" } }, - "EgressOnlyInternetGatewayId": { - "target": "com.amazonaws.ec2#EgressOnlyInternetGatewayId", + "DestinationCidrBlock": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "EgressOnlyInternetGatewayId", - "smithy.api#documentation": "

[IPv6 traffic only] The ID of an egress-only internet gateway.

", - "smithy.api#xmlName": "egressOnlyInternetGatewayId" + "aws.protocols#ec2QueryName": "DestinationCidrBlock", + "smithy.api#documentation": "

The IPv4 CIDR address block used for the destination match. Routing decisions are based on the most specific match. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", + "smithy.api#xmlName": "destinationCidrBlock" } }, "GatewayId": { @@ -18366,38 +18691,28 @@ "smithy.api#xmlName": "gatewayId" } }, - "InstanceId": { - "target": "com.amazonaws.ec2#InstanceId", - "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#documentation": "

The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

", - "smithy.api#xmlName": "instanceId" - } - }, - "NatGatewayId": { - "target": "com.amazonaws.ec2#NatGatewayId", - "traits": { - "aws.protocols#ec2QueryName": "NatGatewayId", - "smithy.api#documentation": "

[IPv4 traffic only] The ID of a NAT gateway.

", - "smithy.api#xmlName": "natGatewayId" - } - }, - "TransitGatewayId": { - "target": "com.amazonaws.ec2#TransitGatewayId", + "DestinationIpv6CidrBlock": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The ID of a transit gateway.

" + "aws.protocols#ec2QueryName": "DestinationIpv6CidrBlock", + "smithy.api#documentation": "

The IPv6 CIDR block used for the destination match. Routing decisions are based on the most specific match.

", + "smithy.api#xmlName": "destinationIpv6CidrBlock" } }, - "LocalGatewayId": { - "target": "com.amazonaws.ec2#LocalGatewayId", + "EgressOnlyInternetGatewayId": { + "target": "com.amazonaws.ec2#EgressOnlyInternetGatewayId", "traits": { - "smithy.api#documentation": "

The ID of the local gateway.

" + "aws.protocols#ec2QueryName": "EgressOnlyInternetGatewayId", + "smithy.api#documentation": "

[IPv6 traffic only] The ID of an egress-only internet gateway.

", + "smithy.api#xmlName": "egressOnlyInternetGatewayId" } }, - "CarrierGatewayId": { - "target": "com.amazonaws.ec2#CarrierGatewayId", + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", "traits": { - "smithy.api#documentation": "

The ID of the carrier gateway.

\n

You can only use this option when the VPC contains a subnet which is associated with a Wavelength Zone.

" + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The ID of a NAT instance in your VPC. The operation fails if you specify an instance ID unless exactly one network interface is attached.

", + "smithy.api#xmlName": "instanceId" } }, "NetworkInterfaceId": { @@ -18408,16 +18723,6 @@ "smithy.api#xmlName": "networkInterfaceId" } }, - "RouteTableId": { - "target": "com.amazonaws.ec2#RouteTableId", - "traits": { - "aws.protocols#ec2QueryName": "RouteTableId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the route table for the route.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "routeTableId" - } - }, "VpcPeeringConnectionId": { "target": "com.amazonaws.ec2#VpcPeeringConnectionId", "traits": { @@ -18426,10 +18731,12 @@ "smithy.api#xmlName": "vpcPeeringConnectionId" } }, - "CoreNetworkArn": { - "target": "com.amazonaws.ec2#CoreNetworkArn", + "NatGatewayId": { + "target": "com.amazonaws.ec2#NatGatewayId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the core network.

" + "aws.protocols#ec2QueryName": "NatGatewayId", + "smithy.api#documentation": "

[IPv4 traffic only] The ID of a NAT gateway.

", + "smithy.api#xmlName": "natGatewayId" } } }, @@ -18493,6 +18800,20 @@ "com.amazonaws.ec2#CreateRouteTableRequest": { "type": "structure", "members": { + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to assign to the route table.

", + "smithy.api#xmlName": "TagSpecification" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", + "smithy.api#idempotencyToken": {} + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -18510,20 +18831,6 @@ "smithy.api#required": {}, "smithy.api#xmlName": "vpcId" } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", - "traits": { - "smithy.api#documentation": "

The tags to assign to the route table.

", - "smithy.api#xmlName": "TagSpecification" - } - }, - "ClientToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency.

", - "smithy.api#idempotencyToken": {} - } } }, "traits": { @@ -18833,24 +19140,24 @@ "com.amazonaws.ec2#CreateSpotDatafeedSubscriptionRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually\n making the request, and provides an error response. If you have the required\n permissions, the error response is DryRunOperation. Otherwise, it is\n UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "Bucket": { "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Bucket", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For\n more information about bucket names, see Rules for bucket\n naming in the Amazon S3 Developer Guide.

", + "smithy.api#documentation": "

The name of the Amazon S3 bucket in which to store the Spot Instance data feed. For\n more information about bucket names, see Bucket naming rules \n in the Amazon S3 User Guide.

", "smithy.api#required": {}, "smithy.api#xmlName": "bucket" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually\n making the request, and provides an error response. If you have the required\n permissions, the error response is DryRunOperation. Otherwise, it is\n UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "Prefix": { "target": "com.amazonaws.ec2#String", "traits": { @@ -19106,14 +19413,6 @@ "smithy.api#required": {} } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "Ipv6Native": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -19143,6 +19442,14 @@ "traits": { "smithy.api#documentation": "

An IPv6 netmask length for the subnet.

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -20444,7 +20751,7 @@ "SecurityGroupReferencingSupport": { "target": "com.amazonaws.ec2#SecurityGroupReferencingSupportValue", "traits": { - "smithy.api#documentation": "\n

This parameter is in preview and may not be available for your account.

\n
\n

Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.

\n

If you don't enable or disable SecurityGroupReferencingSupport in the request, the\n attachment will inherit the security group referencing support setting on the transit\n gateway.

" + "smithy.api#documentation": "

Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management.

\n

This option is set to enable by default. However, at the transit gateway level the default is set to disable.

\n

For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide.

" } }, "Ipv6Support": { @@ -21076,14 +21383,6 @@ "com.amazonaws.ec2#CreateVolumePermission": { "type": "structure", "members": { - "Group": { - "target": "com.amazonaws.ec2#PermissionGroup", - "traits": { - "aws.protocols#ec2QueryName": "Group", - "smithy.api#documentation": "

The group to be added or removed. The possible value is all.

", - "smithy.api#xmlName": "group" - } - }, "UserId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -21091,6 +21390,14 @@ "smithy.api#documentation": "

The ID of the Amazon Web Services account to be added or removed.

", "smithy.api#xmlName": "userId" } + }, + "Group": { + "target": "com.amazonaws.ec2#PermissionGroup", + "traits": { + "aws.protocols#ec2QueryName": "Group", + "smithy.api#documentation": "

The group to be added or removed. The possible value is all.

", + "smithy.api#xmlName": "group" + } } }, "traits": { @@ -21181,14 +21488,6 @@ "smithy.api#documentation": "

The volume type. This parameter can be one of the following values:

\n
    \n
  • \n

    General Purpose SSD: gp2 | gp3\n

    \n
  • \n
  • \n

    Provisioned IOPS SSD: io1 | io2\n

    \n
  • \n
  • \n

    Throughput Optimized HDD: st1\n

    \n
  • \n
  • \n

    Cold HDD: sc1\n

    \n
  • \n
  • \n

    Magnetic: standard\n

    \n
  • \n
\n \n

Throughput Optimized HDD (st1) and Cold HDD (sc1) volumes can't be used as boot volumes.

\n
\n

For more information, see Amazon EBS volume types in the\n Amazon EBS User Guide.

\n

Default: gp2\n

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { @@ -21214,6 +21513,14 @@ "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency \n of the request. For more information, see Ensure \n Idempotency.

", "smithy.api#idempotencyToken": {} } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -21582,28 +21889,25 @@ "com.amazonaws.ec2#CreateVpcPeeringConnectionRequest": { "type": "structure", "members": { - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", + "PeerRegion": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" + "smithy.api#documentation": "

The Region code for the accepter VPC, if the accepter VPC is located in a Region\n other than the Region in which you make the request.

\n

Default: The Region in which you make the request.

" } }, - "PeerOwnerId": { - "target": "com.amazonaws.ec2#String", + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "aws.protocols#ec2QueryName": "PeerOwnerId", - "smithy.api#documentation": "

The Amazon Web Services account ID of the owner of the accepter VPC.

\n

Default: Your Amazon Web Services account ID

", - "smithy.api#xmlName": "peerOwnerId" + "smithy.api#documentation": "

The tags to assign to the peering connection.

", + "smithy.api#xmlName": "TagSpecification" } }, - "PeerVpcId": { - "target": "com.amazonaws.ec2#String", + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "PeerVpcId", - "smithy.api#documentation": "

The ID of the VPC with which you are creating the VPC peering connection. You must\n\t\t\tspecify this parameter in the request.

", - "smithy.api#xmlName": "peerVpcId" + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" } }, "VpcId": { @@ -21616,17 +21920,20 @@ "smithy.api#xmlName": "vpcId" } }, - "PeerRegion": { + "PeerVpcId": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The Region code for the accepter VPC, if the accepter VPC is located in a Region\n other than the Region in which you make the request.

\n

Default: The Region in which you make the request.

" + "aws.protocols#ec2QueryName": "PeerVpcId", + "smithy.api#documentation": "

The ID of the VPC with which you are creating the VPC peering connection. You must\n\t\t\tspecify this parameter in the request.

", + "smithy.api#xmlName": "peerVpcId" } }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "PeerOwnerId": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The tags to assign to the peering connection.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "PeerOwnerId", + "smithy.api#documentation": "

The Amazon Web Services account ID of the owner of the accepter VPC.

\n

Default: Your Amazon Web Services account ID

", + "smithy.api#xmlName": "peerOwnerId" } } }, @@ -21659,14 +21966,6 @@ "smithy.api#documentation": "

The IPv4 network range for the VPC, in CIDR notation. For example,\n\t\t 10.0.0.0/16. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

" } }, - "AmazonProvidedIpv6CidrBlock": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "AmazonProvidedIpv6CidrBlock", - "smithy.api#documentation": "

Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC.\n You cannot specify the range of IP addresses, or the size of the CIDR block.

", - "smithy.api#xmlName": "amazonProvidedIpv6CidrBlock" - } - }, "Ipv6Pool": { "target": "com.amazonaws.ec2#Ipv6PoolEc2Id", "traits": { @@ -21703,6 +22002,19 @@ "smithy.api#documentation": "

The netmask length of the IPv6 CIDR you want to allocate to this VPC from an Amazon VPC IP Address Manager (IPAM) pool. For more information about IPAM, see What is IPAM? in the Amazon VPC IPAM User Guide.

" } }, + "Ipv6CidrBlockNetworkBorderGroup": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The name of the location from which we advertise the IPV6 CIDR block. Use this parameter to limit the address to this location.

\n

You must set AmazonProvidedIpv6CidrBlock to true to use this parameter.

" + } + }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to assign to the VPC.

", + "smithy.api#xmlName": "TagSpecification" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -21719,17 +22031,12 @@ "smithy.api#xmlName": "instanceTenancy" } }, - "Ipv6CidrBlockNetworkBorderGroup": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The name of the location from which we advertise the IPV6 CIDR block. Use this parameter to limit the address to this location.

\n

You must set AmazonProvidedIpv6CidrBlock to true to use this parameter.

" - } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "AmazonProvidedIpv6CidrBlock": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

The tags to assign to the VPC.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "AmazonProvidedIpv6CidrBlock", + "smithy.api#documentation": "

Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC.\n You cannot specify the range of IP addresses, or the size of the CIDR block.

", + "smithy.api#xmlName": "amazonProvidedIpv6CidrBlock" } } }, @@ -21796,6 +22103,13 @@ "smithy.api#documentation": "

The ID of the transit gateway. If you specify a transit gateway, you cannot specify a virtual private\n gateway.

" } }, + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to apply to the VPN connection.

", + "smithy.api#xmlName": "TagSpecification" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -21811,13 +22125,6 @@ "smithy.api#documentation": "

The options for the VPN connection.

", "smithy.api#xmlName": "options" } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", - "traits": { - "smithy.api#documentation": "

The tags to apply to the VPN connection.

", - "smithy.api#xmlName": "TagSpecification" - } } }, "traits": { @@ -22001,36 +22308,44 @@ "com.amazonaws.ec2#CustomerGateway": { "type": "structure", "members": { - "BgpAsn": { + "CertificateArn": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "BgpAsn", - "smithy.api#documentation": "

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

\n

Valid values: 1 to 2,147,483,647\n

", - "smithy.api#xmlName": "bgpAsn" + "aws.protocols#ec2QueryName": "CertificateArn", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the customer gateway certificate.

", + "smithy.api#xmlName": "certificateArn" } }, - "CustomerGatewayId": { + "DeviceName": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "CustomerGatewayId", - "smithy.api#documentation": "

The ID of the customer gateway.

", - "smithy.api#xmlName": "customerGatewayId" + "aws.protocols#ec2QueryName": "DeviceName", + "smithy.api#documentation": "

The name of customer gateway device.

", + "smithy.api#xmlName": "deviceName" } }, - "IpAddress": { + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Any tags assigned to the customer gateway.

", + "smithy.api#xmlName": "tagSet" + } + }, + "BgpAsnExtended": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "IpAddress", - "smithy.api#documentation": "

\n IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.\n

", - "smithy.api#xmlName": "ipAddress" + "aws.protocols#ec2QueryName": "BgpAsnExtended", + "smithy.api#documentation": "

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

\n

Valid values: 2,147,483,648 to 4,294,967,295\n

", + "smithy.api#xmlName": "bgpAsnExtended" } }, - "CertificateArn": { + "CustomerGatewayId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "CertificateArn", - "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the customer gateway certificate.

", - "smithy.api#xmlName": "certificateArn" + "aws.protocols#ec2QueryName": "CustomerGatewayId", + "smithy.api#documentation": "

The ID of the customer gateway.

", + "smithy.api#xmlName": "customerGatewayId" } }, "State": { @@ -22049,28 +22364,20 @@ "smithy.api#xmlName": "type" } }, - "DeviceName": { + "IpAddress": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "DeviceName", - "smithy.api#documentation": "

The name of customer gateway device.

", - "smithy.api#xmlName": "deviceName" - } - }, - "Tags": { - "target": "com.amazonaws.ec2#TagList", - "traits": { - "aws.protocols#ec2QueryName": "TagSet", - "smithy.api#documentation": "

Any tags assigned to the customer gateway.

", - "smithy.api#xmlName": "tagSet" + "aws.protocols#ec2QueryName": "IpAddress", + "smithy.api#documentation": "

\n IPv4 address for the customer gateway device's outside interface. The address must be static. If OutsideIpAddressType in your VPN connection options is set to PrivateIpv4, you can use an RFC6598 or RFC1918 private IPv4 address. If OutsideIpAddressType is set to PublicIpv4, you can use a public IPv4 address.\n

", + "smithy.api#xmlName": "ipAddress" } }, - "BgpAsnExtended": { + "BgpAsn": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "BgpAsnExtended", - "smithy.api#documentation": "

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

\n

Valid values: 2,147,483,648 to 4,294,967,295\n

", - "smithy.api#xmlName": "bgpAsnExtended" + "aws.protocols#ec2QueryName": "BgpAsn", + "smithy.api#documentation": "

The customer gateway device's Border Gateway Protocol (BGP) Autonomous System Number\n (ASN).

\n

Valid values: 1 to 2,147,483,647\n

", + "smithy.api#xmlName": "bgpAsn" } } }, @@ -23533,7 +23840,7 @@ "type": "structure", "members": { "KeyName": { - "target": "com.amazonaws.ec2#KeyPairName", + "target": "com.amazonaws.ec2#KeyPairNameWithResolver", "traits": { "smithy.api#documentation": "

The name of the key pair.

" } @@ -24217,16 +24524,6 @@ "smithy.api#xmlName": "dryRun" } }, - "Egress": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "Egress", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates whether the rule is an egress rule.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "egress" - } - }, "NetworkAclId": { "target": "com.amazonaws.ec2#NetworkAclId", "traits": { @@ -24246,6 +24543,16 @@ "smithy.api#required": {}, "smithy.api#xmlName": "ruleNumber" } + }, + "Egress": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Egress", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Indicates whether the rule is an egress rule.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "egress" + } } }, "traits": { @@ -24613,7 +24920,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, @@ -24834,22 +25141,6 @@ "com.amazonaws.ec2#DeleteRouteRequest": { "type": "structure", "members": { - "DestinationCidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DestinationCidrBlock", - "smithy.api#documentation": "

The IPv4 CIDR range for the route. The value you specify must match the CIDR for the route exactly.

", - "smithy.api#xmlName": "destinationCidrBlock" - } - }, - "DestinationIpv6CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DestinationIpv6CidrBlock", - "smithy.api#documentation": "

The IPv6 CIDR range for the route. The value you specify must match the CIDR for the route exactly.

", - "smithy.api#xmlName": "destinationIpv6CidrBlock" - } - }, "DestinationPrefixListId": { "target": "com.amazonaws.ec2#PrefixListResourceId", "traits": { @@ -24873,6 +25164,22 @@ "smithy.api#required": {}, "smithy.api#xmlName": "routeTableId" } + }, + "DestinationCidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DestinationCidrBlock", + "smithy.api#documentation": "

The IPv4 CIDR range for the route. The value you specify must match the CIDR for the route exactly.

", + "smithy.api#xmlName": "destinationCidrBlock" + } + }, + "DestinationIpv6CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DestinationIpv6CidrBlock", + "smithy.api#documentation": "

The IPv6 CIDR range for the route. The value you specify must match the CIDR for the route exactly.

", + "smithy.api#xmlName": "destinationIpv6CidrBlock" + } } }, "traits": { @@ -27185,14 +27492,6 @@ "com.amazonaws.ec2#DescribeAccountAttributesRequest": { "type": "structure", "members": { - "AttributeNames": { - "target": "com.amazonaws.ec2#AccountAttributeNameStringList", - "traits": { - "aws.protocols#ec2QueryName": "AttributeName", - "smithy.api#documentation": "

The account attribute names.

", - "smithy.api#xmlName": "attributeName" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -27200,6 +27499,14 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "AttributeNames": { + "target": "com.amazonaws.ec2#AccountAttributeNameStringList", + "traits": { + "aws.protocols#ec2QueryName": "AttributeName", + "smithy.api#documentation": "

The account attribute names.

", + "smithy.api#xmlName": "attributeName" + } } }, "traits": { @@ -27427,13 +27734,6 @@ "com.amazonaws.ec2#DescribeAddressesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n allocation-id - The allocation ID for the address.

    \n
  • \n
  • \n

    \n association-id - The association ID for the address.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance the address is associated with, if any.

    \n
  • \n
  • \n

    \n network-border-group - A unique set of Availability Zones, Local Zones,\n or Wavelength Zones from where Amazon Web Services advertises IP addresses.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface that the address is associated with, if any.

    \n
  • \n
  • \n

    \n network-interface-owner-id - The Amazon Web Services account ID of the owner.

    \n
  • \n
  • \n

    \n private-ip-address - The private IP address associated with the Elastic IP address.

    \n
  • \n
  • \n

    \n public-ip - The Elastic IP address, or the carrier IP address.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "PublicIps": { "target": "com.amazonaws.ec2#PublicIpStringList", "traits": { @@ -27441,13 +27741,6 @@ "smithy.api#xmlName": "PublicIp" } }, - "AllocationIds": { - "target": "com.amazonaws.ec2#AllocationIdList", - "traits": { - "smithy.api#documentation": "

Information about the allocation IDs.

", - "smithy.api#xmlName": "AllocationId" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -27455,6 +27748,20 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n
    \n
  • \n

    \n allocation-id - The allocation ID for the address.

    \n
  • \n
  • \n

    \n association-id - The association ID for the address.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance the address is associated with, if any.

    \n
  • \n
  • \n

    \n network-border-group - A unique set of Availability Zones, Local Zones,\n or Wavelength Zones from where Amazon Web Services advertises IP addresses.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface that the address is associated with, if any.

    \n
  • \n
  • \n

    \n network-interface-owner-id - The Amazon Web Services account ID of the owner.

    \n
  • \n
  • \n

    \n private-ip-address - The private IP address associated with the Elastic IP address.

    \n
  • \n
  • \n

    \n public-ip - The Elastic IP address, or the carrier IP address.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, + "AllocationIds": { + "target": "com.amazonaws.ec2#AllocationIdList", + "traits": { + "smithy.api#documentation": "

Information about the allocation IDs.

", + "smithy.api#xmlName": "AllocationId" + } } }, "traits": { @@ -27576,13 +27883,6 @@ "com.amazonaws.ec2#DescribeAvailabilityZonesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n group-name - For Availability Zones, use the Region name. For Local\n Zones, use the name of the group associated with the Local Zone (for example,\n us-west-2-lax-1) For Wavelength Zones, use the name of the group associated\n with the Wavelength Zone (for example, us-east-1-wl1).

    \n
  • \n
  • \n

    \n message - The Zone message.

    \n
  • \n
  • \n

    \n opt-in-status - The opt-in status (opted-in |\n not-opted-in | opt-in-not-required).

    \n
  • \n
  • \n

    \n parent-zone-id - The ID of the zone that handles some of the Local Zone\n and Wavelength Zone control plane operations, such as API calls.

    \n
  • \n
  • \n

    \n parent-zone-name - The ID of the zone that handles some of the Local Zone\n and Wavelength Zone control plane operations, such as API calls.

    \n
  • \n
  • \n

    \n region-name - The name of the Region for the Zone (for example,\n us-east-1).

    \n
  • \n
  • \n

    \n state - The state of the Availability Zone, the Local Zone, or the\n Wavelength Zone (available).

    \n
  • \n
  • \n

    \n zone-id - The ID of the Availability Zone (for example,\n use1-az1), the Local Zone (for example, usw2-lax1-az1), or the\n Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1).

    \n
  • \n
  • \n

    \n zone-name - The name of the Availability Zone (for example,\n us-east-1a), the Local Zone (for example, us-west-2-lax-1a), or\n the Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1).

    \n
  • \n
  • \n

    \n zone-type - The type of zone (availability-zone | \n local-zone | wavelength-zone).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "ZoneNames": { "target": "com.amazonaws.ec2#ZoneNameStringList", "traits": { @@ -27610,6 +27910,13 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n group-name - For Availability Zones, use the Region name. For Local\n Zones, use the name of the group associated with the Local Zone (for example,\n us-west-2-lax-1) For Wavelength Zones, use the name of the group associated\n with the Wavelength Zone (for example, us-east-1-wl1).

    \n
  • \n
  • \n

    \n message - The Zone message.

    \n
  • \n
  • \n

    \n opt-in-status - The opt-in status (opted-in |\n not-opted-in | opt-in-not-required).

    \n
  • \n
  • \n

    \n parent-zone-id - The ID of the zone that handles some of the Local Zone\n and Wavelength Zone control plane operations, such as API calls.

    \n
  • \n
  • \n

    \n parent-zone-name - The ID of the zone that handles some of the Local Zone\n and Wavelength Zone control plane operations, such as API calls.

    \n
  • \n
  • \n

    \n region-name - The name of the Region for the Zone (for example,\n us-east-1).

    \n
  • \n
  • \n

    \n state - The state of the Availability Zone, the Local Zone, or the\n Wavelength Zone (available).

    \n
  • \n
  • \n

    \n zone-id - The ID of the Availability Zone (for example,\n use1-az1), the Local Zone (for example, usw2-lax1-az1), or the\n Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1).

    \n
  • \n
  • \n

    \n zone-name - The name of the Availability Zone (for example,\n us-east-1a), the Local Zone (for example, us-west-2-lax-1a), or\n the Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1).

    \n
  • \n
  • \n

    \n zone-type - The type of zone (availability-zone | \n local-zone | wavelength-zone).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -27756,13 +28063,6 @@ "smithy.api#xmlName": "BundleId" } }, - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n bundle-id - The ID of the bundle task.

    \n
  • \n
  • \n

    \n error-code - If the task failed, the error code returned.

    \n
  • \n
  • \n

    \n error-message - If the task failed, the error message returned.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n progress - The level of task completion, as a percentage (for example, 20%).

    \n
  • \n
  • \n

    \n s3-bucket - The Amazon S3 bucket to store the AMI.

    \n
  • \n
  • \n

    \n s3-prefix - The beginning of the AMI name.

    \n
  • \n
  • \n

    \n start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n state - The state of the task (pending | waiting-for-shutdown | bundling |\n storing | cancelling | complete | failed).

    \n
  • \n
  • \n

    \n update-time - The time of the most recent update for the task.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -27770,6 +28070,13 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n bundle-id - The ID of the bundle task.

    \n
  • \n
  • \n

    \n error-code - If the task failed, the error code returned.

    \n
  • \n
  • \n

    \n error-message - If the task failed, the error message returned.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n progress - The level of task completion, as a percentage (for example, 20%).

    \n
  • \n
  • \n

    \n s3-bucket - The Amazon S3 bucket to store the AMI.

    \n
  • \n
  • \n

    \n s3-prefix - The beginning of the AMI name.

    \n
  • \n
  • \n

    \n start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n state - The state of the task (pending | waiting-for-shutdown | bundling |\n storing | cancelling | complete | failed).

    \n
  • \n
  • \n

    \n update-time - The time of the most recent update for the task.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -27910,17 +28217,13 @@ "InstanceType": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of instance for which the Capacity Block offering reserves capacity.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The type of instance for which the Capacity Block offering reserves capacity.

" } }, "InstanceCount": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The number of instances for which to reserve capacity.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The number of instances for which to reserve capacity.

" } }, "StartDateRange": { @@ -27984,6 +28287,105 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#DescribeCapacityReservationBillingRequests": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DescribeCapacityReservationBillingRequestsRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DescribeCapacityReservationBillingRequestsResult" + }, + "traits": { + "smithy.api#documentation": "

Describes a request to assign the billing of the unused capacity of a Capacity Reservation.\n\t\t\tFor more information, see \n\t\t\t\tBilling assignment for shared Amazon EC2 Capacity Reservations.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "CapacityReservationBillingRequests", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#DescribeCapacityReservationBillingRequestsRequest": { + "type": "structure", + "members": { + "CapacityReservationIds": { + "target": "com.amazonaws.ec2#CapacityReservationIdSet", + "traits": { + "smithy.api#documentation": "

The ID of the Capacity Reservation.

", + "smithy.api#xmlName": "CapacityReservationId" + } + }, + "Role": { + "target": "com.amazonaws.ec2#CallerRole", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Specify one of the following:

\n
    \n
  • \n

    \n odcr-owner - If you are the Capacity Reservation owner, specify this \n\t\t\t\t\tvalue to view requests that you have initiated. Not supported with the requested-by \n\t\t\t\t\tfilter.

    \n
  • \n
  • \n

    \n unused-reservation-billing-owner - If you are the consumer account, \n\t\t\t\t\tspecify this value to view requests that have been sent to you. Not supported with the \n\t\t\t\t\tunused-reservation-billing-owner filter.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The token to use to retrieve the next page of results.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeCapacityReservationBillingRequestsRequestMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, \n see Pagination.

" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n status - The state of the request (pending | accepted | \n\t\t\t\t\trejected | cancelled | revoked | expired).

    \n
  • \n
  • \n

    \n requested-by - The account ID of the Capacity Reservation owner that initiated \n\t\t\t\t\tthe request. Not supported if you specify requested-by for Role.

    \n
  • \n
  • \n

    \n unused-reservation-billing-owner - The ID of the consumer account to which the \n\t\t\t\t\trequest was sent. Not supported if you specify unused-reservation-billing-owner for \n\t\t\t\t\tRole.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DescribeCapacityReservationBillingRequestsRequestMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.ec2#DescribeCapacityReservationBillingRequestsResult": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "smithy.api#xmlName": "nextToken" + } + }, + "CapacityReservationBillingRequests": { + "target": "com.amazonaws.ec2#CapacityReservationBillingRequestSet", + "traits": { + "aws.protocols#ec2QueryName": "CapacityReservationBillingRequestSet", + "smithy.api#documentation": "

Information about the request.

", + "smithy.api#xmlName": "capacityReservationBillingRequestSet" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DescribeCapacityReservationFleets": { "type": "operation", "input": { @@ -28278,13 +28680,6 @@ "com.amazonaws.ec2#DescribeClassicLinkInstancesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n group-id - The ID of a VPC security group that's associated with the instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC to which the instance is linked.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -28300,12 +28695,11 @@ "smithy.api#xmlName": "InstanceId" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeClassicLinkInstancesMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

\n

Constraint: If the value is greater than 1000, we return only 1000 items.

", - "smithy.api#xmlName": "maxResults" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n group-id - The ID of a VPC security group that's associated with the instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC to which the instance is linked.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } }, "NextToken": { @@ -28315,6 +28709,14 @@ "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", "smithy.api#xmlName": "nextToken" } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeClassicLinkInstancesMaxResults", + "traits": { + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

\n

Constraint: If the value is greater than 1000, we return only 1000 items.

", + "smithy.api#xmlName": "maxResults" + } } }, "traits": { @@ -28983,14 +29385,6 @@ "com.amazonaws.ec2#DescribeConversionTasksRequest": { "type": "structure", "members": { - "ConversionTaskIds": { - "target": "com.amazonaws.ec2#ConversionIdStringList", - "traits": { - "aws.protocols#ec2QueryName": "ConversionTaskId", - "smithy.api#documentation": "

The conversion task IDs.

", - "smithy.api#xmlName": "conversionTaskId" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -28998,6 +29392,14 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "ConversionTaskIds": { + "target": "com.amazonaws.ec2#ConversionIdStringList", + "traits": { + "aws.protocols#ec2QueryName": "ConversionTaskId", + "smithy.api#documentation": "

The conversion task IDs.

", + "smithy.api#xmlName": "conversionTaskId" + } } }, "traits": { @@ -29207,11 +29609,16 @@ "smithy.api#xmlName": "DhcpOptionsId" } }, - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n dhcp-options-id - The ID of a DHCP options set.

    \n
  • \n
  • \n

    \n key - The key for one of the options (for example, domain-name).

    \n
  • \n
  • \n

    \n value - The value for one of the options.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the DHCP options set.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeDhcpOptionsMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } }, "DryRun": { @@ -29222,16 +29629,11 @@ "smithy.api#xmlName": "dryRun" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeDhcpOptionsMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n dhcp-options-id - The ID of a DHCP options set.

    \n
  • \n
  • \n

    \n key - The key for one of the options (for example, domain-name).

    \n
  • \n
  • \n

    \n value - The value for one of the options.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the DHCP options set.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -29242,14 +29644,6 @@ "com.amazonaws.ec2#DescribeDhcpOptionsResult": { "type": "structure", "members": { - "DhcpOptions": { - "target": "com.amazonaws.ec2#DhcpOptionsList", - "traits": { - "aws.protocols#ec2QueryName": "DhcpOptionsSet", - "smithy.api#documentation": "

Information about the DHCP options sets.

", - "smithy.api#xmlName": "dhcpOptionsSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -29257,6 +29651,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "DhcpOptions": { + "target": "com.amazonaws.ec2#DhcpOptionsList", + "traits": { + "aws.protocols#ec2QueryName": "DhcpOptionsSet", + "smithy.api#documentation": "

Information about the DHCP options sets.

", + "smithy.api#xmlName": "dhcpOptionsSet" + } } }, "traits": { @@ -29363,7 +29765,7 @@ "target": "com.amazonaws.ec2#DescribeElasticGpusResult" }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes the Elastic Graphics accelerator associated with your instances.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
\n

Describes the Elastic Graphics accelerator associated with your instances.

" } }, "com.amazonaws.ec2#DescribeElasticGpusMaxResults": { @@ -29585,6 +29987,13 @@ "com.amazonaws.ec2#DescribeExportTasksRequest": { "type": "structure", "members": { + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

the filters for the export tasks.

", + "smithy.api#xmlName": "Filter" + } + }, "ExportTaskIds": { "target": "com.amazonaws.ec2#ExportTaskIdStringList", "traits": { @@ -29592,13 +30001,6 @@ "smithy.api#documentation": "

The export task IDs.

", "smithy.api#xmlName": "exportTaskId" } - }, - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

the filters for the export tasks.

", - "smithy.api#xmlName": "Filter" - } } }, "traits": { @@ -30289,7 +30691,7 @@ "target": "com.amazonaws.ec2#PlatformValues", "traits": { "aws.protocols#ec2QueryName": "Platform", - "smithy.api#documentation": "

The value is Windows for Windows instances. Otherwise, the value is\n blank.

", + "smithy.api#documentation": "

The value is windows for Windows instances in an EC2 Fleet. Otherwise, the value is\n blank.

", "smithy.api#xmlName": "platform" } } @@ -30798,14 +31200,6 @@ "com.amazonaws.ec2#DescribeHostsRequest": { "type": "structure", "members": { - "Filter": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "aws.protocols#ec2QueryName": "Filter", - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n auto-placement - Whether auto-placement is enabled or disabled\n (on | off).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the host.

    \n
  • \n
  • \n

    \n client-token - The idempotency token that you provided when you\n allocated the host.

    \n
  • \n
  • \n

    \n host-reservation-id - The ID of the reservation assigned to this\n host.

    \n
  • \n
  • \n

    \n instance-type - The instance type size that the Dedicated Host is\n configured to support.

    \n
  • \n
  • \n

    \n state - The allocation state of the Dedicated Host\n (available | under-assessment |\n permanent-failure | released |\n released-permanent-failure).

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", - "smithy.api#xmlName": "filter" - } - }, "HostIds": { "target": "com.amazonaws.ec2#RequestHostIdList", "traits": { @@ -30814,6 +31208,14 @@ "smithy.api#xmlName": "hostId" } }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "

The token to use to retrieve the next page of results.

", + "smithy.api#xmlName": "nextToken" + } + }, "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -30822,12 +31224,12 @@ "smithy.api#xmlName": "maxResults" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", + "Filter": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "

The token to use to retrieve the next page of results.

", - "smithy.api#xmlName": "nextToken" + "aws.protocols#ec2QueryName": "Filter", + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n auto-placement - Whether auto-placement is enabled or disabled\n (on | off).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the host.

    \n
  • \n
  • \n

    \n client-token - The idempotency token that you provided when you\n allocated the host.

    \n
  • \n
  • \n

    \n host-reservation-id - The ID of the reservation assigned to this\n host.

    \n
  • \n
  • \n

    \n instance-type - The instance type size that the Dedicated Host is\n configured to support.

    \n
  • \n
  • \n

    \n state - The allocation state of the Dedicated Host\n (available | under-assessment |\n permanent-failure | released |\n released-permanent-failure).

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", + "smithy.api#xmlName": "filter" } } }, @@ -31025,6 +31427,14 @@ "com.amazonaws.ec2#DescribeIdentityIdFormatRequest": { "type": "structure", "members": { + "Resource": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Resource", + "smithy.api#documentation": "

The type of resource: bundle |\n conversion-task | customer-gateway | dhcp-options |\n elastic-ip-allocation | elastic-ip-association |\n export-task | flow-log | image |\n import-task | instance | internet-gateway |\n network-acl | network-acl-association |\n network-interface | network-interface-attachment |\n prefix-list | reservation | route-table |\n route-table-association | security-group |\n snapshot | subnet |\n subnet-cidr-block-association | volume | vpc\n | vpc-cidr-block-association | vpc-endpoint |\n vpc-peering-connection | vpn-connection | vpn-gateway\n

", + "smithy.api#xmlName": "resource" + } + }, "PrincipalArn": { "target": "com.amazonaws.ec2#String", "traits": { @@ -31034,14 +31444,6 @@ "smithy.api#required": {}, "smithy.api#xmlName": "principalArn" } - }, - "Resource": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Resource", - "smithy.api#documentation": "

The type of resource: bundle |\n conversion-task | customer-gateway | dhcp-options |\n elastic-ip-allocation | elastic-ip-association |\n export-task | flow-log | image |\n import-task | instance | internet-gateway |\n network-acl | network-acl-association |\n network-interface | network-interface-attachment |\n prefix-list | reservation | route-table |\n route-table-association | security-group |\n snapshot | subnet |\n subnet-cidr-block-association | volume | vpc\n | vpc-cidr-block-association | vpc-endpoint |\n vpc-peering-connection | vpn-connection | vpn-gateway\n

", - "smithy.api#xmlName": "resource" - } } }, "traits": { @@ -31248,13 +31650,6 @@ "smithy.api#xmlName": "ExecutableBy" } }, - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n architecture - The image architecture (i386 | x86_64 | \n arm64 | x86_64_mac | arm64_mac).

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean value that indicates\n \twhether the Amazon EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in the block device mapping (for\n example, /dev/sdh or xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS\n volume.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in GiB.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-type - The volume type of the Amazon EBS volume\n (io1 | io2 | gp2 | gp3 | sc1\n | st1 | standard).

    \n
  • \n
  • \n

    \n block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS volume is encrypted.

    \n
  • \n
  • \n

    \n creation-date - The time when the image was created, in the ISO 8601\n format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for\n example, 2021-09-29T*, which matches an entire day.

    \n
  • \n
  • \n

    \n description - The description of the image (provided during image\n creation).

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether enhanced networking\n with ENA is enabled.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type (ovm |\n xen).

    \n
  • \n
  • \n

    \n image-id - The ID of the image.

    \n
  • \n
  • \n

    \n image-type - The image type (machine | kernel |\n ramdisk).

    \n
  • \n
  • \n

    \n is-public - A Boolean that indicates whether the image is public.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n manifest-location - The location of the image manifest.

    \n
  • \n
  • \n

    \n name - The name of the AMI (provided during image creation).

    \n
  • \n
  • \n

    \n owner-alias - The owner alias (amazon | aws-marketplace). \n The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be \n \tset using the IAM console. We recommend that you use the Owner \n \trequest parameter instead of this filter.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the \n \t\tOwner request parameter instead of this filter.

    \n
  • \n
  • \n

    \n platform - The platform. The only supported value is windows.

    \n
  • \n
  • \n

    \n product-code - The product code.

    \n
  • \n
  • \n

    \n product-code.type - The type of the product code (marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n source-instance-id - The ID of the instance that the AMI was created from\n if the AMI was created using CreateImage. This filter is applicable only if the AMI was\n created using CreateImage.

    \n
  • \n
  • \n

    \n state - The state of the image (available | pending\n | failed).

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - The message for the state change.

    \n
  • \n
  • \n

    \n sriov-net-support - A value of simple indicates\n that enhanced networking with the Intel 82599 VF interface is enabled.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type (paravirtual |\n hvm).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "ImageIds": { "target": "com.amazonaws.ec2#ImageIdStringList", "traits": { @@ -31281,14 +31676,6 @@ "smithy.api#documentation": "

Specifies whether to include disabled AMIs.

\n

Default: No disabled AMIs are included in the response.

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -31300,6 +31687,21 @@ "traits": { "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n architecture - The image architecture (i386 | x86_64 | \n arm64 | x86_64_mac | arm64_mac).

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean value that indicates\n \twhether the Amazon EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in the block device mapping (for\n example, /dev/sdh or xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.snapshot-id - The ID of the snapshot used for the Amazon EBS\n volume.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in GiB.

    \n
  • \n
  • \n

    \n block-device-mapping.volume-type - The volume type of the Amazon EBS volume\n (io1 | io2 | gp2 | gp3 | sc1\n | st1 | standard).

    \n
  • \n
  • \n

    \n block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS volume is encrypted.

    \n
  • \n
  • \n

    \n creation-date - The time when the image was created, in the ISO 8601\n format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard (*), for\n example, 2021-09-29T*, which matches an entire day.

    \n
  • \n
  • \n

    \n description - The description of the image (provided during image\n creation).

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether enhanced networking\n with ENA is enabled.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type (ovm |\n xen).

    \n
  • \n
  • \n

    \n image-id - The ID of the image.

    \n
  • \n
  • \n

    \n image-type - The image type (machine | kernel |\n ramdisk).

    \n
  • \n
  • \n

    \n is-public - A Boolean that indicates whether the image is public.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n manifest-location - The location of the image manifest.

    \n
  • \n
  • \n

    \n name - The name of the AMI (provided during image creation).

    \n
  • \n
  • \n

    \n owner-alias - The owner alias (amazon | aws-marketplace). \n The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be \n \tset using the IAM console. We recommend that you use the Owner \n \trequest parameter instead of this filter.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner. We recommend that you use the \n \t\tOwner request parameter instead of this filter.

    \n
  • \n
  • \n

    \n platform - The platform. The only supported value is windows.

    \n
  • \n
  • \n

    \n product-code - The product code.

    \n
  • \n
  • \n

    \n product-code.type - The type of the product code (marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume (ebs |\n instance-store).

    \n
  • \n
  • \n

    \n source-instance-id - The ID of the instance that the AMI was created from\n if the AMI was created using CreateImage. This filter is applicable only if the AMI was\n created using CreateImage.

    \n
  • \n
  • \n

    \n state - The state of the image (available | pending\n | failed).

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - The message for the state change.

    \n
  • \n
  • \n

    \n sriov-net-support - A value of simple indicates\n that enhanced networking with the Intel 82599 VF interface is enabled.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type (paravirtual |\n hvm).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -31309,14 +31711,6 @@ "com.amazonaws.ec2#DescribeImagesResult": { "type": "structure", "members": { - "Images": { - "target": "com.amazonaws.ec2#ImageList", - "traits": { - "aws.protocols#ec2QueryName": "ImagesSet", - "smithy.api#documentation": "

Information about the images.

", - "smithy.api#xmlName": "imagesSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -31324,6 +31718,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. This value is null when there\n are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "Images": { + "target": "com.amazonaws.ec2#ImageList", + "traits": { + "aws.protocols#ec2QueryName": "ImagesSet", + "smithy.api#documentation": "

Information about the images.

", + "smithy.api#xmlName": "imagesSet" + } } }, "traits": { @@ -31583,21 +31985,11 @@ "com.amazonaws.ec2#DescribeInstanceAttributeRequest": { "type": "structure", "members": { - "Attribute": { - "target": "com.amazonaws.ec2#InstanceAttributeName", - "traits": { - "aws.protocols#ec2QueryName": "Attribute", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The instance attribute.

\n

Note: The enaSupport attribute is not supported at this time.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "attribute" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, @@ -31610,6 +32002,16 @@ "smithy.api#required": {}, "smithy.api#xmlName": "instanceId" } + }, + "Attribute": { + "target": "com.amazonaws.ec2#InstanceAttributeName", + "traits": { + "aws.protocols#ec2QueryName": "Attribute", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The instance attribute.

\n

Note: The enaSupport attribute is not supported at this time.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "attribute" + } } }, "traits": { @@ -31731,7 +32133,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "Filters": { @@ -31886,7 +32288,7 @@ } }, "traits": { - "smithy.api#documentation": "Describe instance event windows by InstanceEventWindow.", + "smithy.api#documentation": "

Describe instance event windows by InstanceEventWindow.

", "smithy.api#input": {} } }, @@ -32017,13 +32419,6 @@ "com.amazonaws.ec2#DescribeInstanceStatusRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n event.code - The code for the scheduled event\n (instance-reboot | system-reboot |\n system-maintenance | instance-retirement |\n instance-stop).

    \n
  • \n
  • \n

    \n event.description - A description of the event.

    \n
  • \n
  • \n

    \n event.instance-event-id - The ID of the event whose date and time\n you are modifying.

    \n
  • \n
  • \n

    \n event.not-after - The latest end time for the scheduled event\n (for example, 2014-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n event.not-before - The earliest start time for the scheduled\n event (for example, 2014-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n event.not-before-deadline - The deadline for starting the event\n (for example, 2014-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n instance-state-code - The code for the instance state, as a\n 16-bit unsigned integer. The high byte is used for internal purposes and should\n be ignored. The low byte is set based on the state represented. The valid values\n are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-status.reachability - Filters on instance status where\n the name is reachability (passed | failed\n | initializing | insufficient-data).

    \n
  • \n
  • \n

    \n instance-status.status - The status of the instance\n (ok | impaired | initializing |\n insufficient-data | not-applicable).

    \n
  • \n
  • \n

    \n system-status.reachability - Filters on system status where the\n name is reachability (passed | failed |\n initializing | insufficient-data).

    \n
  • \n
  • \n

    \n system-status.status - The system status of the instance\n (ok | impaired | initializing |\n insufficient-data | not-applicable).

    \n
  • \n
  • \n

    \n attached-ebs-status.status - The status of the attached EBS volume \n for the instance (ok | impaired | initializing | \n insufficient-data | not-applicable).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "InstanceIds": { "target": "com.amazonaws.ec2#InstanceIdStringList", "traits": { @@ -32047,10 +32442,17 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n event.code - The code for the scheduled event\n (instance-reboot | system-reboot |\n system-maintenance | instance-retirement |\n instance-stop).

    \n
  • \n
  • \n

    \n event.description - A description of the event.

    \n
  • \n
  • \n

    \n event.instance-event-id - The ID of the event whose date and time\n you are modifying.

    \n
  • \n
  • \n

    \n event.not-after - The latest end time for the scheduled event\n (for example, 2014-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n event.not-before - The earliest start time for the scheduled\n event (for example, 2014-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n event.not-before-deadline - The deadline for starting the event\n (for example, 2014-09-15T17:15:20.000Z).

    \n
  • \n
  • \n

    \n instance-state-code - The code for the instance state, as a\n 16-bit unsigned integer. The high byte is used for internal purposes and should\n be ignored. The low byte is set based on the state represented. The valid values\n are 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-status.reachability - Filters on instance status where\n the name is reachability (passed | failed\n | initializing | insufficient-data).

    \n
  • \n
  • \n

    \n instance-status.status - The status of the instance\n (ok | impaired | initializing |\n insufficient-data | not-applicable).

    \n
  • \n
  • \n

    \n system-status.reachability - Filters on system status where the\n name is reachability (passed | failed |\n initializing | insufficient-data).

    \n
  • \n
  • \n

    \n system-status.status - The system status of the instance\n (ok | impaired | initializing |\n insufficient-data | not-applicable).

    \n
  • \n
  • \n

    \n attached-ebs-status.status - The status of the attached EBS volume \n for the instance (ok | impaired | initializing | \n insufficient-data | not-applicable).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, "IncludeAllInstances": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -32097,7 +32499,7 @@ "target": "com.amazonaws.ec2#DescribeInstanceTopologyResult" }, "traits": { - "smithy.api#documentation": "

Describes a tree-based hierarchy that represents the physical host placement of your\n EC2 instances within an Availability Zone or Local Zone. You can use this information to\n determine the relative proximity of your EC2 instances within the Amazon Web Services network to\n support your tightly coupled workloads.

\n

\n Limitations\n

\n
    \n
  • \n

    Supported zones

    \n
      \n
    • \n

      Availability Zone

      \n
    • \n
    • \n

      Local Zone

      \n
    • \n
    \n
  • \n
  • \n

    Supported instance types

    \n
      \n
    • \n

      \n hpc6a.48xlarge | hpc6id.32xlarge |\n hpc7a.12xlarge | hpc7a.24xlarge |\n hpc7a.48xlarge | hpc7a.96xlarge |\n hpc7g.4xlarge | hpc7g.8xlarge |\n hpc7g.16xlarge\n

      \n
    • \n
    • \n

      \n p3dn.24xlarge | p4d.24xlarge |\n p4de.24xlarge | p5.48xlarge\n

      \n
    • \n
    • \n

      \n trn1.2xlarge | trn1.32xlarge |\n trn1n.32xlarge\n

      \n
    • \n
    \n
  • \n
\n

For more information, see Amazon EC2 instance\n topology in the Amazon EC2 User Guide.

", + "smithy.api#documentation": "

Describes a tree-based hierarchy that represents the physical host placement of your\n EC2 instances within an Availability Zone or Local Zone. You can use this information to\n determine the relative proximity of your EC2 instances within the Amazon Web Services network to\n support your tightly coupled workloads.

\n

\n Limitations\n

\n
    \n
  • \n

    Supported zones

    \n
      \n
    • \n

      Availability Zone

      \n
    • \n
    • \n

      Local Zone

      \n
    • \n
    \n
  • \n
  • \n

    Supported instance types

    \n
      \n
    • \n

      \n hpc6a.48xlarge | hpc6id.32xlarge |\n hpc7a.12xlarge | hpc7a.24xlarge |\n hpc7a.48xlarge | hpc7a.96xlarge |\n hpc7g.4xlarge | hpc7g.8xlarge |\n hpc7g.16xlarge\n

      \n
    • \n
    • \n

      \n p3dn.24xlarge | p4d.24xlarge |\n p4de.24xlarge | p5.48xlarge |\n p5e.48xlarge\n

      \n
    • \n
    • \n

      \n trn1.2xlarge | trn1.32xlarge |\n trn1n.32xlarge\n

      \n
    • \n
    \n
  • \n
\n

For more information, see Amazon EC2 instance\n topology in the Amazon EC2 User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -32133,7 +32535,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "NextToken": { @@ -32588,13 +32990,6 @@ "com.amazonaws.ec2#DescribeInstancesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n iam-instance-profile.name - The instance profile associated with\n the instance. Specified as an name.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or\n a Capacity Block (spot | scheduled | capacity-block).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the instance.\n This can only be used to filter by the primary IP address of the network\n interface attached to the instance. To filter by additional IP addresses\n assigned to the network interface, use the filter\n network-interface.addresses.private-ip-address.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "InstanceIds": { "target": "com.amazonaws.ec2#InstanceIdStringList", "traits": { @@ -32606,16 +33001,15 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#Integer", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

\n

You cannot specify this parameter and the instance IDs parameter in the same request.

", - "smithy.api#xmlName": "maxResults" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n affinity - The affinity setting for an instance running on a\n Dedicated Host (default | host).

    \n
  • \n
  • \n

    \n architecture - The instance architecture (i386 |\n x86_64 | arm64).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n block-device-mapping.attach-time - The attach time for an EBS\n volume mapped to the instance, for example,\n 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n block-device-mapping.delete-on-termination - A Boolean that\n indicates whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n block-device-mapping.device-name - The device name specified in\n the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n block-device-mapping.status - The status for the EBS volume\n (attaching | attached | detaching |\n detached).

    \n
  • \n
  • \n

    \n block-device-mapping.volume-id - The volume ID of the EBS\n volume.

    \n
  • \n
  • \n

    \n boot-mode - The boot mode that was specified by the AMI\n (legacy-bios | uefi |\n uefi-preferred).

    \n
  • \n
  • \n

    \n capacity-reservation-id - The ID of the Capacity Reservation into which the\n instance was launched.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-preference\n - The instance's Capacity Reservation preference (open | none).

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-id\n - The ID of the targeted Capacity Reservation.

    \n
  • \n
  • \n

    \n capacity-reservation-specification.capacity-reservation-target.capacity-reservation-resource-group-arn\n - The ARN of the targeted Capacity Reservation group.

    \n
  • \n
  • \n

    \n client-token - The idempotency token you provided when you\n launched the instance.

    \n
  • \n
  • \n

    \n current-instance-boot-mode - The boot mode that is used to launch\n the instance at launch or start (legacy-bios |\n uefi).

    \n
  • \n
  • \n

    \n dns-name - The public DNS name of the instance.

    \n
  • \n
  • \n

    \n ebs-optimized - A Boolean that indicates whether the instance is\n optimized for Amazon EBS I/O.

    \n
  • \n
  • \n

    \n ena-support - A Boolean that indicates whether the instance is\n enabled for enhanced networking with ENA.

    \n
  • \n
  • \n

    \n enclave-options.enabled - A Boolean that indicates whether the\n instance is enabled for Amazon Web Services Nitro Enclaves.

    \n
  • \n
  • \n

    \n hibernation-options.configured - A Boolean that indicates whether\n the instance is enabled for hibernation. A value of true means that\n the instance is enabled for hibernation.

    \n
  • \n
  • \n

    \n host-id - The ID of the Dedicated Host on which the instance is\n running, if applicable.

    \n
  • \n
  • \n

    \n hypervisor - The hypervisor type of the instance\n (ovm | xen). The value xen is used\n for both Xen and Nitro hypervisors.

    \n
  • \n
  • \n

    \n iam-instance-profile.arn - The instance profile associated with\n the instance. Specified as an ARN.

    \n
  • \n
  • \n

    \n iam-instance-profile.id - The instance profile associated with\n the instance. Specified as an ID.

    \n
  • \n
  • \n

    \n image-id - The ID of the image used to launch the\n instance.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance.

    \n
  • \n
  • \n

    \n instance-lifecycle - Indicates whether this is a Spot Instance, a Scheduled Instance, or\n a Capacity Block (spot | scheduled | capacity-block).

    \n
  • \n
  • \n

    \n instance-state-code - The state of the instance, as a 16-bit\n unsigned integer. The high byte is used for internal purposes and should be\n ignored. The low byte is set based on the state represented. The valid values\n are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64\n (stopping), and 80 (stopped).

    \n
  • \n
  • \n

    \n instance-state-name - The state of the instance\n (pending | running | shutting-down |\n terminated | stopping |\n stopped).

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n t2.micro).

    \n
  • \n
  • \n

    \n instance.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n instance.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n ip-address - The public IPv4 address of the instance.

    \n
  • \n
  • \n

    \n ipv6-address - The IPv6 address of the instance.

    \n
  • \n
  • \n

    \n kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair used when the instance was\n launched.

    \n
  • \n
  • \n

    \n launch-index - When launching multiple instances, this is the\n index for the instance in the launch group (for example, 0, 1, 2, and so on).\n

    \n
  • \n
  • \n

    \n launch-time - The time when the instance was launched, in the ISO\n 8601 format in the UTC time zone (YYYY-MM-DDThh:mm:ss.sssZ), for example,\n 2021-09-29T11:04:43.305Z. You can use a wildcard\n (*), for example, 2021-09-29T*, which matches an\n entire day.

    \n
  • \n
  • \n

    \n maintenance-options.auto-recovery - The current automatic\n recovery behavior of the instance (disabled | default).

    \n
  • \n
  • \n

    \n metadata-options.http-endpoint - The status of access to the HTTP\n metadata endpoint on your instance (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv4 - Indicates whether the IPv4\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-protocol-ipv6 - Indicates whether the IPv6\n endpoint is enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n metadata-options.http-put-response-hop-limit - The HTTP metadata\n request put response hop limit (integer, possible values 1 to\n 64)

    \n
  • \n
  • \n

    \n metadata-options.http-tokens - The metadata request authorization\n state (optional | required)

    \n
  • \n
  • \n

    \n metadata-options.instance-metadata-tags - The status of access to\n instance tags from the instance metadata (enabled |\n disabled)

    \n
  • \n
  • \n

    \n metadata-options.state - The state of the metadata option changes\n (pending | applied).

    \n
  • \n
  • \n

    \n monitoring-state - Indicates whether detailed monitoring is\n enabled (disabled | enabled).

    \n
  • \n
  • \n

    \n network-interface.addresses.association.allocation-id - The allocation ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.association-id - The association ID.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.carrier-ip - The carrier IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.ip-owner-id - The owner\n ID of the private IPv4 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.association.public-ip - The ID of the\n association of an Elastic IP address (IPv4) with a network interface.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Specifies whether the IPv4\n address of the network interface is the primary private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-dns-name - The private DNS name.

    \n
  • \n
  • \n

    \n network-interface.addresses.private-ip-address - The private IPv4\n address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.allocation-id - The allocation ID\n returned when you allocated the Elastic IP address (IPv4) for your network\n interface.

    \n
  • \n
  • \n

    \n network-interface.association.association-id - The association ID\n returned when the network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.association.carrier-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.customer-owned-ip - The customer-owned IP address.

    \n
  • \n
  • \n

    \n network-interface.association.ip-owner-id - The owner of the\n Elastic IP address (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.association.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.association.public-ip - The address of the\n Elastic IP address (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n network-interface.attachment.attach-time - The time that the\n network interface was attached to an instance.

    \n
  • \n
  • \n

    \n network-interface.attachment.attachment-id - The ID of the\n interface attachment.

    \n
  • \n
  • \n

    \n network-interface.attachment.delete-on-termination - Specifies\n whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.attachment.device-index - The device index to\n which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-id - The ID of the instance\n to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.instance-owner-id - The owner ID of\n the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n network-interface.attachment.network-card-index - The index of the network card.

    \n
  • \n
  • \n

    \n network-interface.attachment.status - The status of the\n attachment (attaching | attached |\n detaching | detached).

    \n
  • \n
  • \n

    \n network-interface.availability-zone - The Availability Zone for\n the network interface.

    \n
  • \n
  • \n

    \n network-interface.deny-all-igw-traffic - A Boolean that indicates whether \n a network interface with an IPv6 address is unreachable from the public internet.

    \n
  • \n
  • \n

    \n network-interface.description - The description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.group-name - The name of a security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv4-prefixes.ipv4-prefix - The IPv4 prefixes that are assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-address - The IPv6 address associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.ipv6-address - The IPv6 address\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-addresses.is-primary-ipv6 - A Boolean that indicates whether this\n is the primary IPv6 address.

    \n
  • \n
  • \n

    \n network-interface.ipv6-native - A Boolean that indicates whether this is\n an IPv6 only network interface.

    \n
  • \n
  • \n

    \n network-interface.ipv6-prefixes.ipv6-prefix - The IPv6 prefix assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.mac-address - The MAC address of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.outpost-arn - The ARN of the Outpost.

    \n
  • \n
  • \n

    \n network-interface.owner-id - The ID of the owner of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-dns-name - The private DNS name of the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The private IPv4 address.

    \n
  • \n
  • \n

    \n network-interface.public-dns-name - The public DNS name.

    \n
  • \n
  • \n

    \n network-interface.requester-id - The requester ID for the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.requester-managed - Indicates whether the\n network interface is being managed by Amazon Web Services.

    \n
  • \n
  • \n

    \n network-interface.status - The status of the network interface\n (available) | in-use).

    \n
  • \n
  • \n

    \n network-interface.source-dest-check - Whether the network\n interface performs source/destination checking. A value of true\n means that checking is enabled, and false means that checking is\n disabled. The value must be false for the network interface to\n perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-key - The key of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.tag-value - The value of a tag assigned to the network interface.

    \n
  • \n
  • \n

    \n network-interface.vpc-id - The ID of the VPC for the network\n interface.

    \n
  • \n
  • \n

    \n outpost-arn - The Amazon Resource Name (ARN) of the\n Outpost.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the instance\n owner.

    \n
  • \n
  • \n

    \n placement-group-name - The name of the placement group for the\n instance.

    \n
  • \n
  • \n

    \n placement-partition-number - The partition in which the instance is\n located.

    \n
  • \n
  • \n

    \n platform - The platform. To list only Windows instances, use\n windows.

    \n
  • \n
  • \n

    \n platform-details - The platform (Linux/UNIX |\n Red Hat BYOL Linux | Red Hat Enterprise Linux |\n Red Hat Enterprise Linux with HA | Red Hat Enterprise\n Linux with SQL Server Standard and HA | Red Hat Enterprise\n Linux with SQL Server Enterprise and HA | Red Hat Enterprise\n Linux with SQL Server Standard | Red Hat Enterprise Linux with\n SQL Server Web | Red Hat Enterprise Linux with SQL Server\n Enterprise | SQL Server Enterprise | SQL Server\n Standard | SQL Server Web | SUSE Linux |\n Ubuntu Pro | Windows | Windows BYOL |\n Windows with SQL Server Enterprise | Windows with SQL\n Server Standard | Windows with SQL Server Web).

    \n
  • \n
  • \n

    \n private-dns-name - The private IPv4 DNS name of the\n instance.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-a-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS A records.

    \n
  • \n
  • \n

    \n private-dns-name-options.enable-resource-name-dns-aaaa-record - A\n Boolean that indicates whether to respond to DNS queries for instance hostnames\n with DNS AAAA records.

    \n
  • \n
  • \n

    \n private-dns-name-options.hostname-type - The type of hostname\n (ip-name | resource-name).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address of the instance.\n This can only be used to filter by the primary IP address of the network\n interface attached to the instance. To filter by additional IP addresses\n assigned to the network interface, use the filter\n network-interface.addresses.private-ip-address.

    \n
  • \n
  • \n

    \n product-code - The product code associated with the AMI used to\n launch the instance.

    \n
  • \n
  • \n

    \n product-code.type - The type of product code (devpay\n | marketplace).

    \n
  • \n
  • \n

    \n ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n reason - The reason for the current state of the instance (for\n example, shows \"User Initiated [date]\" when you stop or terminate the instance).\n Similar to the state-reason-code filter.

    \n
  • \n
  • \n

    \n requester-id - The ID of the entity that launched the instance on\n your behalf (for example, Amazon Web Services Management Console, Auto Scaling, and so\n on).

    \n
  • \n
  • \n

    \n reservation-id - The ID of the instance's reservation. A\n reservation ID is created any time you launch an instance. A reservation ID has\n a one-to-one relationship with an instance launch request, but can be associated\n with more than one instance if you launch multiple instances using the same\n launch request. For example, if you launch one instance, you get one reservation\n ID. If you launch ten instances using the same launch request, you also get one\n reservation ID.

    \n
  • \n
  • \n

    \n root-device-name - The device name of the root device volume (for\n example, /dev/sda1).

    \n
  • \n
  • \n

    \n root-device-type - The type of the root device volume\n (ebs | instance-store).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the instance performs\n source/destination checking. A value of true means that checking is\n enabled, and false means that checking is disabled. The value must\n be false for the instance to perform network address translation\n (NAT) in your VPC.

    \n
  • \n
  • \n

    \n spot-instance-request-id - The ID of the Spot Instance\n request.

    \n
  • \n
  • \n

    \n state-reason-code - The reason code for the state change.

    \n
  • \n
  • \n

    \n state-reason-message - A message that describes the state\n change.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the instance.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tenancy - The tenancy of an instance (dedicated |\n default | host).

    \n
  • \n
  • \n

    \n tpm-support - Indicates if the instance is configured for\n NitroTPM support (v2.0).

    \n
  • \n
  • \n

    \n usage-operation - The usage operation value for the instance\n (RunInstances | RunInstances:00g0 |\n RunInstances:0010 | RunInstances:1010 |\n RunInstances:1014 | RunInstances:1110 |\n RunInstances:0014 | RunInstances:0210 |\n RunInstances:0110 | RunInstances:0100 |\n RunInstances:0004 | RunInstances:0200 |\n RunInstances:000g | RunInstances:0g00 |\n RunInstances:0002 | RunInstances:0800 |\n RunInstances:0102 | RunInstances:0006 |\n RunInstances:0202).

    \n
  • \n
  • \n

    \n usage-operation-update-time - The time that the usage operation\n was last updated, for example, 2022-09-15T17:15:20.000Z.

    \n
  • \n
  • \n

    \n virtualization-type - The virtualization type of the instance\n (paravirtual | hvm).

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC that the instance is running in.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } }, "NextToken": { @@ -32625,6 +33019,14 @@ "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", "smithy.api#xmlName": "nextToken" } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

\n

You cannot specify this parameter and the instance IDs parameter in the same request.

", + "smithy.api#xmlName": "maxResults" + } } }, "traits": { @@ -32634,14 +33036,6 @@ "com.amazonaws.ec2#DescribeInstancesResult": { "type": "structure", "members": { - "Reservations": { - "target": "com.amazonaws.ec2#ReservationList", - "traits": { - "aws.protocols#ec2QueryName": "ReservationSet", - "smithy.api#documentation": "

Information about the reservations.

", - "smithy.api#xmlName": "reservationSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -32649,6 +33043,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. This value is null when there\n are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "Reservations": { + "target": "com.amazonaws.ec2#ReservationList", + "traits": { + "aws.protocols#ec2QueryName": "ReservationSet", + "smithy.api#documentation": "

Information about the reservations.

", + "smithy.api#xmlName": "reservationSet" + } } }, "traits": { @@ -32741,11 +33143,16 @@ "com.amazonaws.ec2#DescribeInternetGatewaysRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n attachment.state - The current state of the attachment between the gateway\n and the VPC (available). Present only if a VPC is attached.

    \n
  • \n
  • \n

    \n attachment.vpc-id - The ID of an attached VPC.

    \n
  • \n
  • \n

    \n internet-gateway-id - The ID of the Internet gateway.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the internet gateway.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeInternetGatewaysMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } }, "DryRun": { @@ -32764,16 +33171,11 @@ "smithy.api#xmlName": "internetGatewayId" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeInternetGatewaysMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n attachment.state - The current state of the attachment between the gateway\n and the VPC (available). Present only if a VPC is attached.

    \n
  • \n
  • \n

    \n attachment.vpc-id - The ID of an attached VPC.

    \n
  • \n
  • \n

    \n internet-gateway-id - The ID of the Internet gateway.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the internet gateway.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -33504,13 +33906,6 @@ "com.amazonaws.ec2#DescribeKeyPairsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n key-pair-id - The ID of the key pair.

    \n
  • \n
  • \n

    \n fingerprint - The fingerprint of the key pair.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "KeyNames": { "target": "com.amazonaws.ec2#KeyNameStringList", "traits": { @@ -33525,6 +33920,12 @@ "smithy.api#xmlName": "KeyPairId" } }, + "IncludePublicKey": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

If true, the public key material is included in the response.

\n

Default: false\n

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -33533,10 +33934,11 @@ "smithy.api#xmlName": "dryRun" } }, - "IncludePublicKey": { - "target": "com.amazonaws.ec2#Boolean", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

If true, the public key material is included in the response.

\n

Default: false\n

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n key-pair-id - The ID of the key pair.

    \n
  • \n
  • \n

    \n fingerprint - The fingerprint of the key pair.

    \n
  • \n
  • \n

    \n key-name - The name of the key pair.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -34641,14 +35043,6 @@ "com.amazonaws.ec2#DescribeMovingAddressesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "aws.protocols#ec2QueryName": "Filter", - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n moving-status - The status of the Elastic IP address\n (MovingToVpc | RestoringToClassic).

    \n
  • \n
", - "smithy.api#xmlName": "filter" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -34657,12 +35051,12 @@ "smithy.api#xmlName": "dryRun" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeMovingAddressesMaxResults", + "PublicIps": { + "target": "com.amazonaws.ec2#ValueStringList", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of results to return for the request in a single page. The remaining\n results of the initial request can be seen by sending another request with the returned\n NextToken value. This value can be between 5 and 1000; if\n MaxResults is given a value outside of this range, an error is returned.

\n

Default: If no value is provided, the default is 1000.

", - "smithy.api#xmlName": "maxResults" + "aws.protocols#ec2QueryName": "PublicIp", + "smithy.api#documentation": "

One or more Elastic IP addresses.

", + "smithy.api#xmlName": "publicIp" } }, "NextToken": { @@ -34673,12 +35067,20 @@ "smithy.api#xmlName": "nextToken" } }, - "PublicIps": { - "target": "com.amazonaws.ec2#ValueStringList", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "aws.protocols#ec2QueryName": "PublicIp", - "smithy.api#documentation": "

One or more Elastic IP addresses.

", - "smithy.api#xmlName": "publicIp" + "aws.protocols#ec2QueryName": "Filter", + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n moving-status - The status of the Elastic IP address\n (MovingToVpc | RestoringToClassic).

    \n
  • \n
", + "smithy.api#xmlName": "filter" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeMovingAddressesMaxResults", + "traits": { + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of results to return for the request in a single page. The remaining\n results of the initial request can be seen by sending another request with the returned\n NextToken value. This value can be between 5 and 1000; if\n MaxResults is given a value outside of this range, an error is returned.

\n

Default: If no value is provided, the default is 1000.

", + "smithy.api#xmlName": "maxResults" } } }, @@ -34986,11 +35388,16 @@ "com.amazonaws.ec2#DescribeNetworkAclsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n association.association-id - The ID of an association ID for the ACL.

    \n
  • \n
  • \n

    \n association.network-acl-id - The ID of the network ACL involved in the association.

    \n
  • \n
  • \n

    \n association.subnet-id - The ID of the subnet involved in the association.

    \n
  • \n
  • \n

    \n default - Indicates whether the ACL is the default network ACL for the VPC.

    \n
  • \n
  • \n

    \n entry.cidr - The IPv4 CIDR range specified in the entry.

    \n
  • \n
  • \n

    \n entry.icmp.code - The ICMP code specified in the entry, if any.

    \n
  • \n
  • \n

    \n entry.icmp.type - The ICMP type specified in the entry, if any.

    \n
  • \n
  • \n

    \n entry.ipv6-cidr - The IPv6 CIDR range specified in the entry.

    \n
  • \n
  • \n

    \n entry.port-range.from - The start of the port range specified in the entry.

    \n
  • \n
  • \n

    \n entry.port-range.to - The end of the port range specified in the entry.

    \n
  • \n
  • \n

    \n entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    \n
  • \n
  • \n

    \n entry.rule-action - Allows or denies the matching traffic (allow | deny).

    \n
  • \n
  • \n

    \n entry.egress - A Boolean that indicates the type of rule. Specify true \n\t\t for egress rules, or false for ingress rules.

    \n
  • \n
  • \n

    \n entry.rule-number - The number of an entry (in other words, rule) in\n the set of ACL entries.

    \n
  • \n
  • \n

    \n network-acl-id - The ID of the network ACL.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the network ACL.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network ACL.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeNetworkAclsMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } }, "DryRun": { @@ -35008,16 +35415,11 @@ "smithy.api#xmlName": "NetworkAclId" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeNetworkAclsMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n association.association-id - The ID of an association ID for the ACL.

    \n
  • \n
  • \n

    \n association.network-acl-id - The ID of the network ACL involved in the association.

    \n
  • \n
  • \n

    \n association.subnet-id - The ID of the subnet involved in the association.

    \n
  • \n
  • \n

    \n default - Indicates whether the ACL is the default network ACL for the VPC.

    \n
  • \n
  • \n

    \n entry.cidr - The IPv4 CIDR range specified in the entry.

    \n
  • \n
  • \n

    \n entry.icmp.code - The ICMP code specified in the entry, if any.

    \n
  • \n
  • \n

    \n entry.icmp.type - The ICMP type specified in the entry, if any.

    \n
  • \n
  • \n

    \n entry.ipv6-cidr - The IPv6 CIDR range specified in the entry.

    \n
  • \n
  • \n

    \n entry.port-range.from - The start of the port range specified in the entry.

    \n
  • \n
  • \n

    \n entry.port-range.to - The end of the port range specified in the entry.

    \n
  • \n
  • \n

    \n entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a protocol number).

    \n
  • \n
  • \n

    \n entry.rule-action - Allows or denies the matching traffic (allow | deny).

    \n
  • \n
  • \n

    \n entry.egress - A Boolean that indicates the type of rule. Specify true \n\t\t for egress rules, or false for ingress rules.

    \n
  • \n
  • \n

    \n entry.rule-number - The number of an entry (in other words, rule) in\n the set of ACL entries.

    \n
  • \n
  • \n

    \n network-acl-id - The ID of the network ACL.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the network ACL.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network ACL.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -35495,14 +35897,6 @@ "com.amazonaws.ec2#DescribeNetworkInterfaceAttributeRequest": { "type": "structure", "members": { - "Attribute": { - "target": "com.amazonaws.ec2#NetworkInterfaceAttribute", - "traits": { - "aws.protocols#ec2QueryName": "Attribute", - "smithy.api#documentation": "

The attribute of the network interface. This parameter is required.

", - "smithy.api#xmlName": "attribute" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -35520,6 +35914,14 @@ "smithy.api#required": {}, "smithy.api#xmlName": "networkInterfaceId" } + }, + "Attribute": { + "target": "com.amazonaws.ec2#NetworkInterfaceAttribute", + "traits": { + "aws.protocols#ec2QueryName": "Attribute", + "smithy.api#documentation": "

The attribute of the network interface. This parameter is required.

", + "smithy.api#xmlName": "attribute" + } } }, "traits": { @@ -35791,12 +36193,16 @@ "com.amazonaws.ec2#DescribeNetworkInterfacesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "Filter", - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | \n\t\t ec2_instance_connect_endpoint | efa | efs | \n\t\t gateway_load_balancer | gateway_load_balancer_endpoint | \n\t\t global_accelerator_managed | \n\t\t interface | iot_rules_managed | \n\t\t lambda | load_balancer | \n\t\t nat_gateway | network_load_balancer | \n\t\t quicksight | \n\t\t transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services \n\t\t service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", - "smithy.api#xmlName": "filter" + "smithy.api#documentation": "

The token returned from a previous paginated request.\n\t\t Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeNetworkInterfacesMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request. To get the next page of items,\n\t\t make another request with the token returned in the output. You cannot specify this\n\t\t parameter and the network interface IDs parameter in the same request. For more information, \n\t\t see Pagination.

" } }, "DryRun": { @@ -35814,16 +36220,12 @@ "smithy.api#xmlName": "NetworkInterfaceId" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request.\n\t\t Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeNetworkInterfacesMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request. To get the next page of items,\n\t\t make another request with the token returned in the output. You cannot specify this\n\t\t parameter and the network interface IDs parameter in the same request. For more information, \n\t\t see Pagination.

" + "aws.protocols#ec2QueryName": "Filter", + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n association.allocation-id - The allocation ID returned when you\n\t\t allocated the Elastic IP address (IPv4) for your network interface.

    \n
  • \n
  • \n

    \n association.association-id - The association ID returned when the\n\t\t network interface was associated with an IPv4 address.

    \n
  • \n
  • \n

    \n addresses.association.owner-id - The owner ID of the addresses associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.association.public-ip - The association ID returned when\n\t\t the network interface was associated with the Elastic IP address\n\t\t (IPv4).

    \n
  • \n
  • \n

    \n addresses.primary - Whether the private IPv4 address is the primary\n IP address associated with the network interface.

    \n
  • \n
  • \n

    \n addresses.private-ip-address - The private IPv4 addresses\n\t\t associated with the network interface.

    \n
  • \n
  • \n

    \n association.ip-owner-id - The owner of the Elastic IP address\n (IPv4) associated with the network interface.

    \n
  • \n
  • \n

    \n association.public-ip - The address of the Elastic IP address\n (IPv4) bound to the network interface.

    \n
  • \n
  • \n

    \n association.public-dns-name - The public DNS name for the network\n interface (IPv4).

    \n
  • \n
  • \n

    \n attachment.attach-time - The time that the network interface was attached to an instance.

    \n
  • \n
  • \n

    \n attachment.attachment-id - The ID of the interface attachment.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Indicates whether the attachment is deleted when an instance is terminated.

    \n
  • \n
  • \n

    \n attachment.device-index - The device index to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.instance-owner-id - The owner ID of the instance to which the network interface is attached.

    \n
  • \n
  • \n

    \n attachment.status - The status of the attachment (attaching | attached | detaching | detached).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the network interface.

    \n
  • \n
  • \n

    \n description - The description of the network interface.

    \n
  • \n
  • \n

    \n group-id - The ID of a security group associated with the network interface.

    \n
  • \n
  • \n

    \n ipv6-addresses.ipv6-address - An IPv6 address associated with\n the network interface.

    \n
  • \n
  • \n

    \n interface-type - The type of network interface (api_gateway_managed | \n\t\t aws_codestar_connections_managed | branch | \n\t\t ec2_instance_connect_endpoint | efa | efs | \n\t\t gateway_load_balancer | gateway_load_balancer_endpoint | \n\t\t global_accelerator_managed | \n\t\t interface | iot_rules_managed | \n\t\t lambda | load_balancer | \n\t\t nat_gateway | network_load_balancer | \n\t\t quicksight | \n\t\t transit_gateway | trunk | \n\t\t vpc_endpoint).

    \n
  • \n
  • \n

    \n mac-address - The MAC address of the network interface.

    \n
  • \n
  • \n

    \n network-interface-id - The ID of the network interface.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the network interface owner.

    \n
  • \n
  • \n

    \n private-dns-name - The private DNS name of the network interface (IPv4).

    \n
  • \n
  • \n

    \n private-ip-address - The private IPv4 address or addresses of the\n network interface.

    \n
  • \n
  • \n

    \n requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface.

    \n
  • \n
  • \n

    \n requester-managed - Indicates whether the network interface is being managed by an Amazon Web Services \n\t\t service (for example, Amazon Web Services Management Console, Auto Scaling, and so on).

    \n
  • \n
  • \n

    \n source-dest-check - Indicates whether the network interface performs source/destination checking. \n\t\t A value of true means checking is enabled, and false means checking is disabled. \n\t\t The value must be false for the network interface to perform network address translation (NAT) in your VPC.

    \n
  • \n
  • \n

    \n status - The status of the network interface. If the network interface is not attached to an instance, the status is available; \n\t\t if a network interface is attached to an instance the status is in-use.

    \n
  • \n
  • \n

    \n subnet-id - The ID of the subnet for the network interface.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the network interface.

    \n
  • \n
", + "smithy.api#xmlName": "filter" } } }, @@ -35871,18 +36273,18 @@ "com.amazonaws.ec2#DescribePlacementGroupsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "GroupIds": { + "target": "com.amazonaws.ec2#PlacementGroupIdStringList", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n group-name - The name of the placement group.

    \n
  • \n
  • \n

    \n group-arn - The Amazon Resource Name (ARN) of the placement\n group.

    \n
  • \n
  • \n

    \n spread-level - The spread level for the placement group\n (host | rack).

    \n
  • \n
  • \n

    \n state - The state of the placement group (pending |\n available | deleting |\n deleted).

    \n
  • \n
  • \n

    \n strategy - The strategy of the placement group\n (cluster | spread |\n partition).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The IDs of the placement groups.

", + "smithy.api#xmlName": "GroupId" } }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, @@ -35894,11 +36296,11 @@ "smithy.api#xmlName": "groupName" } }, - "GroupIds": { - "target": "com.amazonaws.ec2#PlacementGroupIdStringList", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The IDs of the placement groups.

", - "smithy.api#xmlName": "GroupId" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n group-name - The name of the placement group.

    \n
  • \n
  • \n

    \n group-arn - The Amazon Resource Name (ARN) of the placement\n group.

    \n
  • \n
  • \n

    \n spread-level - The spread level for the placement group\n (host | rack).

    \n
  • \n
  • \n

    \n state - The state of the placement group (pending |\n available | deleting |\n deleted).

    \n
  • \n
  • \n

    \n strategy - The strategy of the placement group\n (cluster | spread |\n partition).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -36246,13 +36648,6 @@ "com.amazonaws.ec2#DescribeRegionsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com).

    \n
  • \n
  • \n

    \n opt-in-status - The opt-in status of the Region (opt-in-not-required | opted-in | \n not-opted-in).

    \n
  • \n
  • \n

    \n region-name - The name of the Region (for example, us-east-1).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "RegionNames": { "target": "com.amazonaws.ec2#RegionNameStringList", "traits": { @@ -36260,6 +36655,12 @@ "smithy.api#xmlName": "RegionName" } }, + "AllRegions": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether to display all Regions, including Regions that are disabled for your account.

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -36268,10 +36669,11 @@ "smithy.api#xmlName": "dryRun" } }, - "AllRegions": { - "target": "com.amazonaws.ec2#Boolean", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

Indicates whether to display all Regions, including Regions that are disabled for your account.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com).

    \n
  • \n
  • \n

    \n opt-in-status - The opt-in status of the Region (opt-in-not-required | opted-in | \n not-opted-in).

    \n
  • \n
  • \n

    \n region-name - The name of the Region (for example, us-east-1).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -36413,13 +36815,6 @@ "com.amazonaws.ec2#DescribeReservedInstancesListingsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n reserved-instances-id - The ID of the Reserved Instances.

    \n
  • \n
  • \n

    \n reserved-instances-listing-id - The ID of the Reserved Instances listing.

    \n
  • \n
  • \n

    \n status - The status of the Reserved Instance listing (pending | active |\n cancelled | closed).

    \n
  • \n
  • \n

    \n status-message - The reason for the status.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "ReservedInstancesId": { "target": "com.amazonaws.ec2#ReservationId", "traits": { @@ -36435,6 +36830,13 @@ "smithy.api#documentation": "

One or more Reserved Instance listing IDs.

", "smithy.api#xmlName": "reservedInstancesListingId" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n reserved-instances-id - The ID of the Reserved Instances.

    \n
  • \n
  • \n

    \n reserved-instances-listing-id - The ID of the Reserved Instances listing.

    \n
  • \n
  • \n

    \n status - The status of the Reserved Instance listing (pending | active |\n cancelled | closed).

    \n
  • \n
  • \n

    \n status-message - The reason for the status.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -36479,13 +36881,6 @@ "com.amazonaws.ec2#DescribeReservedInstancesModificationsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n client-token - The idempotency token for the modification request.

    \n
  • \n
  • \n

    \n create-date - The time when the modification request was created.

    \n
  • \n
  • \n

    \n effective-date - The time when the modification becomes effective.

    \n
  • \n
  • \n

    \n modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    \n
  • \n
  • \n

    \n modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    \n
  • \n
  • \n

    \n modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    \n
  • \n
  • \n

    \n modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    \n
  • \n
  • \n

    \n reserved-instances-id - The ID of the Reserved Instances modified.

    \n
  • \n
  • \n

    \n reserved-instances-modification-id - The ID of the modification request.

    \n
  • \n
  • \n

    \n status - The status of the Reserved Instances modification request\n (processing | fulfilled | failed).

    \n
  • \n
  • \n

    \n status-message - The reason for the status.

    \n
  • \n
  • \n

    \n update-date - The time when the modification request was last updated.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "ReservedInstancesModificationIds": { "target": "com.amazonaws.ec2#ReservedInstancesModificationIdStringList", "traits": { @@ -36500,6 +36895,13 @@ "smithy.api#documentation": "

The token to retrieve the next page of results.

", "smithy.api#xmlName": "nextToken" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n client-token - The idempotency token for the modification request.

    \n
  • \n
  • \n

    \n create-date - The time when the modification request was created.

    \n
  • \n
  • \n

    \n effective-date - The time when the modification becomes effective.

    \n
  • \n
  • \n

    \n modification-result.reserved-instances-id - The ID for the Reserved Instances created as part of the modification request. This ID is only available when the status of the modification is fulfilled.

    \n
  • \n
  • \n

    \n modification-result.target-configuration.availability-zone - The Availability Zone for the new Reserved Instances.

    \n
  • \n
  • \n

    \n modification-result.target-configuration.instance-count - The number of new Reserved Instances.

    \n
  • \n
  • \n

    \n modification-result.target-configuration.instance-type - The instance type of the new Reserved Instances.

    \n
  • \n
  • \n

    \n reserved-instances-id - The ID of the Reserved Instances modified.

    \n
  • \n
  • \n

    \n reserved-instances-modification-id - The ID of the modification request.

    \n
  • \n
  • \n

    \n status - The status of the Reserved Instances modification request\n (processing | fulfilled | failed).

    \n
  • \n
  • \n

    \n status-message - The reason for the status.

    \n
  • \n
  • \n

    \n update-date - The time when the modification request was last updated.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -36559,13 +36961,6 @@ "smithy.api#documentation": "

The Availability Zone in which the Reserved Instance can be used.

" } }, - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone where the Reserved Instance can be\n used.

    \n
  • \n
  • \n

    \n duration - The duration of the Reserved Instance (for example, one year or\n three years), in seconds (31536000 | 94608000).

    \n
  • \n
  • \n

    \n fixed-price - The purchase price of the Reserved Instance (for example,\n 9800.0).

    \n
  • \n
  • \n

    \n instance-type - The instance type that is covered by the\n reservation.

    \n
  • \n
  • \n

    \n marketplace - Set to true to show only Reserved Instance\n Marketplace offerings. When this filter is not used, which is the default behavior, all\n offerings from both Amazon Web Services and the Reserved Instance Marketplace are listed.

    \n
  • \n
  • \n

    \n product-description - The Reserved Instance product platform description\n (Linux/UNIX | Linux with SQL Server Standard |\n Linux with SQL Server Web | Linux with SQL Server Enterprise |\n SUSE Linux | \n Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | \n Windows | Windows with SQL Server Standard |\n Windows with SQL Server Web | Windows with SQL Server Enterprise).

    \n
  • \n
  • \n

    \n reserved-instances-offering-id - The Reserved Instances offering\n ID.

    \n
  • \n
  • \n

    \n scope - The scope of the Reserved Instance (Availability Zone or\n Region).

    \n
  • \n
  • \n

    \n usage-price - The usage price of the Reserved Instance, per hour (for\n example, 0.84).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "IncludeMarketplace": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -36623,6 +37018,13 @@ "smithy.api#xmlName": "dryRun" } }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone where the Reserved Instance can be\n used.

    \n
  • \n
  • \n

    \n duration - The duration of the Reserved Instance (for example, one year or\n three years), in seconds (31536000 | 94608000).

    \n
  • \n
  • \n

    \n fixed-price - The purchase price of the Reserved Instance (for example,\n 9800.0).

    \n
  • \n
  • \n

    \n instance-type - The instance type that is covered by the\n reservation.

    \n
  • \n
  • \n

    \n marketplace - Set to true to show only Reserved Instance\n Marketplace offerings. When this filter is not used, which is the default behavior, all\n offerings from both Amazon Web Services and the Reserved Instance Marketplace are listed.

    \n
  • \n
  • \n

    \n product-description - The Reserved Instance product platform description\n (Linux/UNIX | Linux with SQL Server Standard |\n Linux with SQL Server Web | Linux with SQL Server Enterprise |\n SUSE Linux | \n Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | \n Windows | Windows with SQL Server Standard |\n Windows with SQL Server Web | Windows with SQL Server Enterprise).

    \n
  • \n
  • \n

    \n reserved-instances-offering-id - The Reserved Instances offering\n ID.

    \n
  • \n
  • \n

    \n scope - The scope of the Reserved Instance (Availability Zone or\n Region).

    \n
  • \n
  • \n

    \n usage-price - The usage price of the Reserved Instance, per hour (for\n example, 0.84).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, "InstanceTenancy": { "target": "com.amazonaws.ec2#Tenancy", "traits": { @@ -36631,12 +37033,12 @@ "smithy.api#xmlName": "instanceTenancy" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#Integer", + "OfferingType": { + "target": "com.amazonaws.ec2#OfferingTypeValues", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of results to return for the request in a single page. The remaining\n\t\t\tresults of the initial request can be seen by sending another request with the returned\n\t\t\t\tNextToken value. The maximum is 100.

\n

Default: 100

", - "smithy.api#xmlName": "maxResults" + "aws.protocols#ec2QueryName": "OfferingType", + "smithy.api#documentation": "

The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API\n\t\t\tversion, you only have access to the Medium Utilization Reserved Instance\n\t\t\toffering type.

", + "smithy.api#xmlName": "offeringType" } }, "NextToken": { @@ -36647,12 +37049,12 @@ "smithy.api#xmlName": "nextToken" } }, - "OfferingType": { - "target": "com.amazonaws.ec2#OfferingTypeValues", + "MaxResults": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "OfferingType", - "smithy.api#documentation": "

The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API\n\t\t\tversion, you only have access to the Medium Utilization Reserved Instance\n\t\t\toffering type.

", - "smithy.api#xmlName": "offeringType" + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of results to return for the request in a single page. The remaining\n\t\t\tresults of the initial request can be seen by sending another request with the returned\n\t\t\t\tNextToken value. The maximum is 100.

\n

Default: 100

", + "smithy.api#xmlName": "maxResults" } } }, @@ -36664,14 +37066,6 @@ "com.amazonaws.ec2#DescribeReservedInstancesOfferingsResult": { "type": "structure", "members": { - "ReservedInstancesOfferings": { - "target": "com.amazonaws.ec2#ReservedInstancesOfferingList", - "traits": { - "aws.protocols#ec2QueryName": "ReservedInstancesOfferingsSet", - "smithy.api#documentation": "

A list of Reserved Instances offerings.

", - "smithy.api#xmlName": "reservedInstancesOfferingsSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -36679,6 +37073,14 @@ "smithy.api#documentation": "

The token to use to retrieve the next page of results. This value is null when\n\t\t\tthere are no more results to return.

", "smithy.api#xmlName": "nextToken" } + }, + "ReservedInstancesOfferings": { + "target": "com.amazonaws.ec2#ReservedInstancesOfferingList", + "traits": { + "aws.protocols#ec2QueryName": "ReservedInstancesOfferingsSet", + "smithy.api#documentation": "

A list of Reserved Instances offerings.

", + "smithy.api#xmlName": "reservedInstancesOfferingsSet" + } } }, "traits": { @@ -36689,13 +37091,6 @@ "com.amazonaws.ec2#DescribeReservedInstancesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone where the Reserved Instance can be used.

    \n
  • \n
  • \n

    \n duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    \n
  • \n
  • \n

    \n end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    \n
  • \n
  • \n

    \n fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    \n
  • \n
  • \n

    \n instance-type - The instance type that is covered by the reservation.

    \n
  • \n
  • \n

    \n scope - The scope of the Reserved Instance (Region or Availability Zone).

    \n
  • \n
  • \n

    \n product-description - The Reserved Instance product platform description\n (Linux/UNIX | Linux with SQL Server Standard |\n Linux with SQL Server Web | Linux with SQL Server Enterprise |\n SUSE Linux | \n Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | \n Windows | Windows with SQL Server Standard |\n Windows with SQL Server Web | Windows with SQL Server Enterprise).

    \n
  • \n
  • \n

    \n reserved-instances-id - The ID of the Reserved Instance.

    \n
  • \n
  • \n

    \n start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    \n
  • \n
  • \n

    \n state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "OfferingClass": { "target": "com.amazonaws.ec2#OfferingClassType", "traits": { @@ -36717,6 +37112,13 @@ "smithy.api#xmlName": "dryRun" } }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

One or more filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone where the Reserved Instance can be used.

    \n
  • \n
  • \n

    \n duration - The duration of the Reserved Instance (one year or three years), in seconds (31536000 | 94608000).

    \n
  • \n
  • \n

    \n end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z).

    \n
  • \n
  • \n

    \n fixed-price - The purchase price of the Reserved Instance (for example, 9800.0).

    \n
  • \n
  • \n

    \n instance-type - The instance type that is covered by the reservation.

    \n
  • \n
  • \n

    \n scope - The scope of the Reserved Instance (Region or Availability Zone).

    \n
  • \n
  • \n

    \n product-description - The Reserved Instance product platform description\n (Linux/UNIX | Linux with SQL Server Standard |\n Linux with SQL Server Web | Linux with SQL Server Enterprise |\n SUSE Linux | \n Red Hat Enterprise Linux | Red Hat Enterprise Linux with HA | \n Windows | Windows with SQL Server Standard |\n Windows with SQL Server Web | Windows with SQL Server Enterprise).

    \n
  • \n
  • \n

    \n reserved-instances-id - The ID of the Reserved Instance.

    \n
  • \n
  • \n

    \n start - The time at which the Reserved Instance purchase request was placed (for example, 2014-08-07T11:54:42.000Z).

    \n
  • \n
  • \n

    \n state - The state of the Reserved Instance (payment-pending | active | payment-failed | retired).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n usage-price - The usage price of the Reserved Instance, per hour (for example, 0.84).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, "OfferingType": { "target": "com.amazonaws.ec2#OfferingTypeValues", "traits": { @@ -36813,11 +37215,16 @@ "com.amazonaws.ec2#DescribeRouteTablesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n association.gateway-id - The ID of the gateway involved in the\n\t\t association.

    \n
  • \n
  • \n

    \n association.route-table-association-id - The ID of an association\n ID for the route table.

    \n
  • \n
  • \n

    \n association.route-table-id - The ID of the route table involved in\n the association.

    \n
  • \n
  • \n

    \n association.subnet-id - The ID of the subnet involved in the\n association.

    \n
  • \n
  • \n

    \n association.main - Indicates whether the route table is the main\n route table for the VPC (true | false). Route tables\n that do not have an association ID are not returned in the response.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the route table.

    \n
  • \n
  • \n

    \n route-table-id - The ID of the route table.

    \n
  • \n
  • \n

    \n route.destination-cidr-block - The IPv4 CIDR range specified in a\n route in the table.

    \n
  • \n
  • \n

    \n route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Services \n\t\t\t\t service specified in a route in the table.

    \n
  • \n
  • \n

    \n route.egress-only-internet-gateway-id - The ID of an\n egress-only Internet gateway specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.gateway-id - The ID of a gateway specified in a route in the table.

    \n
  • \n
  • \n

    \n route.instance-id - The ID of an instance specified in a route in the table.

    \n
  • \n
  • \n

    \n route.nat-gateway-id - The ID of a NAT gateway.

    \n
  • \n
  • \n

    \n route.transit-gateway-id - The ID of a transit gateway.

    \n
  • \n
  • \n

    \n route.origin - Describes how the route was created. \n CreateRouteTable indicates that the route was automatically\n created when the route table was created; CreateRoute indicates\n that the route was manually added to the route table;\n EnableVgwRoutePropagation indicates that the route was\n propagated by route propagation.

    \n
  • \n
  • \n

    \n route.state - The state of a route in the route table\n (active | blackhole). The blackhole state\n indicates that the route's target isn't available (for example, the specified\n gateway isn't attached to the VPC, the specified NAT instance has been\n terminated, and so on).

    \n
  • \n
  • \n

    \n route.vpc-peering-connection-id - The ID of a VPC peering\n\t\t connection specified in a route in the table.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the route table.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeRouteTablesMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } }, "DryRun": { @@ -36835,16 +37242,11 @@ "smithy.api#xmlName": "RouteTableId" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeRouteTablesMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n association.gateway-id - The ID of the gateway involved in the\n\t\t association.

    \n
  • \n
  • \n

    \n association.route-table-association-id - The ID of an association\n ID for the route table.

    \n
  • \n
  • \n

    \n association.route-table-id - The ID of the route table involved in\n the association.

    \n
  • \n
  • \n

    \n association.subnet-id - The ID of the subnet involved in the\n association.

    \n
  • \n
  • \n

    \n association.main - Indicates whether the route table is the main\n route table for the VPC (true | false). Route tables\n that do not have an association ID are not returned in the response.

    \n
  • \n
  • \n

    \n owner-id - The ID of the Amazon Web Services account that owns the route table.

    \n
  • \n
  • \n

    \n route-table-id - The ID of the route table.

    \n
  • \n
  • \n

    \n route.destination-cidr-block - The IPv4 CIDR range specified in a\n route in the table.

    \n
  • \n
  • \n

    \n route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Services \n\t\t\t\t service specified in a route in the table.

    \n
  • \n
  • \n

    \n route.egress-only-internet-gateway-id - The ID of an\n egress-only Internet gateway specified in a route in the route table.

    \n
  • \n
  • \n

    \n route.gateway-id - The ID of a gateway specified in a route in the table.

    \n
  • \n
  • \n

    \n route.instance-id - The ID of an instance specified in a route in the table.

    \n
  • \n
  • \n

    \n route.nat-gateway-id - The ID of a NAT gateway.

    \n
  • \n
  • \n

    \n route.transit-gateway-id - The ID of a transit gateway.

    \n
  • \n
  • \n

    \n route.origin - Describes how the route was created. \n CreateRouteTable indicates that the route was automatically\n created when the route table was created; CreateRoute indicates\n that the route was manually added to the route table;\n EnableVgwRoutePropagation indicates that the route was\n propagated by route propagation.

    \n
  • \n
  • \n

    \n route.state - The state of a route in the route table\n (active | blackhole). The blackhole state\n indicates that the route's target isn't available (for example, the specified\n gateway isn't attached to the VPC, the specified NAT instance has been\n terminated, and so on).

    \n
  • \n
  • \n

    \n route.vpc-peering-connection-id - The ID of a VPC peering\n\t\t connection specified in a route in the table.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC for the route table.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -37325,13 +37727,6 @@ "com.amazonaws.ec2#DescribeSecurityGroupsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

\n
    \n
  • \n

    \n description - The description of the security group.

    \n
  • \n
  • \n

    \n egress.ip-permission.cidr - An IPv4 CIDR block for an outbound\n security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.from-port - For an outbound rule, the\n start of port range for the TCP and UDP protocols, or an ICMP type\n number.

    \n
  • \n
  • \n

    \n egress.ip-permission.group-id - The ID of a security group\n that has been referenced in an outbound security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.group-name - The name of a security group\n that is referenced in an outbound security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an\n outbound security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.prefix-list-id - The ID of a prefix list to which a security group rule allows outbound access.

    \n
  • \n
  • \n

    \n egress.ip-permission.protocol - The IP protocol for an\n outbound security group rule (tcp | udp |\n icmp, a protocol number, or -1 for all protocols).

    \n
  • \n
  • \n

    \n egress.ip-permission.to-port - For an outbound rule, the end\n of port range for the TCP and UDP protocols, or an ICMP code.

    \n
  • \n
  • \n

    \n egress.ip-permission.user-id - The ID of an Amazon Web Services account that\n has been referenced in an outbound security group rule.

    \n
  • \n
  • \n

    \n group-id - The ID of the security group.

    \n
  • \n
  • \n

    \n group-name - The name of the security group.

    \n
  • \n
  • \n

    \n ip-permission.cidr - An IPv4 CIDR block for an inbound security\n group rule.

    \n
  • \n
  • \n

    \n ip-permission.from-port - For an inbound rule, the start of port\n range for the TCP and UDP protocols, or an ICMP type number.

    \n
  • \n
  • \n

    \n ip-permission.group-id - The ID of a security group that has been\n referenced in an inbound security group rule.

    \n
  • \n
  • \n

    \n ip-permission.group-name - The name of a security group that is\n referenced in an inbound security group rule.

    \n
  • \n
  • \n

    \n ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security\n group rule.

    \n
  • \n
  • \n

    \n ip-permission.prefix-list-id - The ID of a prefix list from which a security group rule allows inbound access.

    \n
  • \n
  • \n

    \n ip-permission.protocol - The IP protocol for an inbound security\n group rule (tcp | udp | icmp, a\n protocol number, or -1 for all protocols).

    \n
  • \n
  • \n

    \n ip-permission.to-port - For an inbound rule, the end of port range\n for the TCP and UDP protocols, or an ICMP code.

    \n
  • \n
  • \n

    \n ip-permission.user-id - The ID of an Amazon Web Services account that has been\n referenced in an inbound security group rule.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner of the security group.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC specified when the security group was created.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "GroupIds": { "target": "com.amazonaws.ec2#GroupIdStringList", "traits": { @@ -37346,14 +37741,6 @@ "smithy.api#xmlName": "GroupName" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -37365,6 +37752,21 @@ "traits": { "smithy.api#documentation": "

The maximum number of items to return for this request. To get the next page of items,\n make another request with the token returned in the output. This value can be between 5 and 1000. \n If this parameter is not specified, then all items are returned. For more information, see \n Pagination.

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters. If using multiple filters for rules, the results include security groups for which any combination of rules - not necessarily a single rule - match all filters.

\n
    \n
  • \n

    \n description - The description of the security group.

    \n
  • \n
  • \n

    \n egress.ip-permission.cidr - An IPv4 CIDR block for an outbound\n security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.from-port - For an outbound rule, the\n start of port range for the TCP and UDP protocols, or an ICMP type\n number.

    \n
  • \n
  • \n

    \n egress.ip-permission.group-id - The ID of a security group\n that has been referenced in an outbound security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.group-name - The name of a security group\n that is referenced in an outbound security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an\n outbound security group rule.

    \n
  • \n
  • \n

    \n egress.ip-permission.prefix-list-id - The ID of a prefix list to which a security group rule allows outbound access.

    \n
  • \n
  • \n

    \n egress.ip-permission.protocol - The IP protocol for an\n outbound security group rule (tcp | udp |\n icmp, a protocol number, or -1 for all protocols).

    \n
  • \n
  • \n

    \n egress.ip-permission.to-port - For an outbound rule, the end\n of port range for the TCP and UDP protocols, or an ICMP code.

    \n
  • \n
  • \n

    \n egress.ip-permission.user-id - The ID of an Amazon Web Services account that\n has been referenced in an outbound security group rule.

    \n
  • \n
  • \n

    \n group-id - The ID of the security group.

    \n
  • \n
  • \n

    \n group-name - The name of the security group.

    \n
  • \n
  • \n

    \n ip-permission.cidr - An IPv4 CIDR block for an inbound security\n group rule.

    \n
  • \n
  • \n

    \n ip-permission.from-port - For an inbound rule, the start of port\n range for the TCP and UDP protocols, or an ICMP type number.

    \n
  • \n
  • \n

    \n ip-permission.group-id - The ID of a security group that has been\n referenced in an inbound security group rule.

    \n
  • \n
  • \n

    \n ip-permission.group-name - The name of a security group that is\n referenced in an inbound security group rule.

    \n
  • \n
  • \n

    \n ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security\n group rule.

    \n
  • \n
  • \n

    \n ip-permission.prefix-list-id - The ID of a prefix list from which a security group rule allows inbound access.

    \n
  • \n
  • \n

    \n ip-permission.protocol - The IP protocol for an inbound security\n group rule (tcp | udp | icmp, a\n protocol number, or -1 for all protocols).

    \n
  • \n
  • \n

    \n ip-permission.to-port - For an inbound rule, the end of port range\n for the TCP and UDP protocols, or an ICMP code.

    \n
  • \n
  • \n

    \n ip-permission.user-id - The ID of an Amazon Web Services account that has been\n referenced in an inbound security group rule.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner of the security group.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-id - The ID of the VPC specified when the security group was created.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -37374,14 +37776,6 @@ "com.amazonaws.ec2#DescribeSecurityGroupsResult": { "type": "structure", "members": { - "SecurityGroups": { - "target": "com.amazonaws.ec2#SecurityGroupList", - "traits": { - "aws.protocols#ec2QueryName": "SecurityGroupInfo", - "smithy.api#documentation": "

Information about the security groups.

", - "smithy.api#xmlName": "securityGroupInfo" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -37389,6 +37783,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "SecurityGroups": { + "target": "com.amazonaws.ec2#SecurityGroupList", + "traits": { + "aws.protocols#ec2QueryName": "SecurityGroupInfo", + "smithy.api#documentation": "

Information about the security groups.

", + "smithy.api#xmlName": "securityGroupInfo" + } } }, "traits": { @@ -37456,14 +37858,6 @@ "com.amazonaws.ec2#DescribeSnapshotAttributeResult": { "type": "structure", "members": { - "CreateVolumePermissions": { - "target": "com.amazonaws.ec2#CreateVolumePermissionList", - "traits": { - "aws.protocols#ec2QueryName": "CreateVolumePermission", - "smithy.api#documentation": "

The users and groups that have the permissions for creating volumes from the\n snapshot.

", - "smithy.api#xmlName": "createVolumePermission" - } - }, "ProductCodes": { "target": "com.amazonaws.ec2#ProductCodeList", "traits": { @@ -37479,6 +37873,14 @@ "smithy.api#documentation": "

The ID of the EBS snapshot.

", "smithy.api#xmlName": "snapshotId" } + }, + "CreateVolumePermissions": { + "target": "com.amazonaws.ec2#CreateVolumePermissionList", + "traits": { + "aws.protocols#ec2QueryName": "CreateVolumePermission", + "smithy.api#documentation": "

The users and groups that have the permissions for creating volumes from the\n snapshot.

", + "smithy.api#xmlName": "createVolumePermission" + } } }, "traits": { @@ -37669,13 +38071,6 @@ "com.amazonaws.ec2#DescribeSnapshotsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n description - A description of the snapshot.

    \n
  • \n
  • \n

    \n encrypted - Indicates whether the snapshot is encrypted\n (true | false)

    \n
  • \n
  • \n

    \n owner-alias - The owner alias, from an Amazon-maintained list \n (amazon). \n This is not the user-configured Amazon Web Services account alias set using the IAM console.\n We recommend that you use the related parameter instead of this filter.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner. We recommend that \n you use the related parameter instead of this filter.

    \n
  • \n
  • \n

    \n progress - The progress of the snapshot, as a percentage (for example,\n 80%).

    \n
  • \n
  • \n

    \n snapshot-id - The snapshot ID.

    \n
  • \n
  • \n

    \n start-time - The time stamp when the snapshot was initiated.

    \n
  • \n
  • \n

    \n status - The status of the snapshot (pending |\n completed | error).

    \n
  • \n
  • \n

    \n storage-tier - The storage tier of the snapshot (archive |\n standard).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n volume-id - The ID of the volume the snapshot is for.

    \n
  • \n
  • \n

    \n volume-size - The size of the volume, in GiB.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -37716,6 +38111,13 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n description - A description of the snapshot.

    \n
  • \n
  • \n

    \n encrypted - Indicates whether the snapshot is encrypted\n (true | false)

    \n
  • \n
  • \n

    \n owner-alias - The owner alias, from an Amazon-maintained list \n (amazon). \n This is not the user-configured Amazon Web Services account alias set using the IAM console.\n We recommend that you use the related parameter instead of this filter.

    \n
  • \n
  • \n

    \n owner-id - The Amazon Web Services account ID of the owner. We recommend that \n you use the related parameter instead of this filter.

    \n
  • \n
  • \n

    \n progress - The progress of the snapshot, as a percentage (for example,\n 80%).

    \n
  • \n
  • \n

    \n snapshot-id - The snapshot ID.

    \n
  • \n
  • \n

    \n start-time - The time stamp when the snapshot was initiated.

    \n
  • \n
  • \n

    \n status - The status of the snapshot (pending |\n completed | error).

    \n
  • \n
  • \n

    \n storage-tier - The storage tier of the snapshot (archive |\n standard).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n volume-id - The ID of the volume the snapshot is for.

    \n
  • \n
  • \n

    \n volume-size - The size of the volume, in GiB.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -37725,14 +38127,6 @@ "com.amazonaws.ec2#DescribeSnapshotsResult": { "type": "structure", "members": { - "Snapshots": { - "target": "com.amazonaws.ec2#SnapshotList", - "traits": { - "aws.protocols#ec2QueryName": "SnapshotSet", - "smithy.api#documentation": "

Information about the snapshots.

", - "smithy.api#xmlName": "snapshotSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -37740,6 +38134,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "Snapshots": { + "target": "com.amazonaws.ec2#SnapshotList", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotSet", + "smithy.api#documentation": "

Information about the snapshots.

", + "smithy.api#xmlName": "snapshotSet" + } } }, "traits": { @@ -37857,12 +38259,14 @@ "smithy.api#xmlName": "dryRun" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeSpotFleetInstancesMaxResults", + "SpotFleetRequestId": { + "target": "com.amazonaws.ec2#SpotFleetRequestId", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

", - "smithy.api#xmlName": "maxResults" + "aws.protocols#ec2QueryName": "SpotFleetRequestId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the Spot Fleet request.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "spotFleetRequestId" } }, "NextToken": { @@ -37873,14 +38277,12 @@ "smithy.api#xmlName": "nextToken" } }, - "SpotFleetRequestId": { - "target": "com.amazonaws.ec2#SpotFleetRequestId", + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeSpotFleetInstancesMaxResults", "traits": { - "aws.protocols#ec2QueryName": "SpotFleetRequestId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the Spot Fleet request.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "spotFleetRequestId" + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

", + "smithy.api#xmlName": "maxResults" } } }, @@ -38001,6 +38403,16 @@ "smithy.api#xmlName": "dryRun" } }, + "SpotFleetRequestId": { + "target": "com.amazonaws.ec2#SpotFleetRequestId", + "traits": { + "aws.protocols#ec2QueryName": "SpotFleetRequestId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the Spot Fleet request.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "spotFleetRequestId" + } + }, "EventType": { "target": "com.amazonaws.ec2#EventType", "traits": { @@ -38009,12 +38421,14 @@ "smithy.api#xmlName": "eventType" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeSpotFleetRequestHistoryMaxResults", + "StartTime": { + "target": "com.amazonaws.ec2#DateTime", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

", - "smithy.api#xmlName": "maxResults" + "aws.protocols#ec2QueryName": "StartTime", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The starting date and time for the events, in UTC format (for example,\n YYYY-MM-DDTHH:MM:SSZ).

", + "smithy.api#required": {}, + "smithy.api#xmlName": "startTime" } }, "NextToken": { @@ -38025,24 +38439,12 @@ "smithy.api#xmlName": "nextToken" } }, - "SpotFleetRequestId": { - "target": "com.amazonaws.ec2#SpotFleetRequestId", - "traits": { - "aws.protocols#ec2QueryName": "SpotFleetRequestId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the Spot Fleet request.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "spotFleetRequestId" - } - }, - "StartTime": { - "target": "com.amazonaws.ec2#DateTime", + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeSpotFleetRequestHistoryMaxResults", "traits": { - "aws.protocols#ec2QueryName": "StartTime", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The starting date and time for the events, in UTC format (for example,\n YYYY-MM-DDTHH:MM:SSZ).

", - "smithy.api#required": {}, - "smithy.api#xmlName": "startTime" + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

", + "smithy.api#xmlName": "maxResults" } } }, @@ -38183,12 +38585,12 @@ "smithy.api#xmlName": "dryRun" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#Integer", + "SpotFleetRequestIds": { + "target": "com.amazonaws.ec2#SpotFleetRequestIdList", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

", - "smithy.api#xmlName": "maxResults" + "aws.protocols#ec2QueryName": "SpotFleetRequestId", + "smithy.api#documentation": "

The IDs of the Spot Fleet requests.

", + "smithy.api#xmlName": "spotFleetRequestId" } }, "NextToken": { @@ -38199,12 +38601,12 @@ "smithy.api#xmlName": "nextToken" } }, - "SpotFleetRequestIds": { - "target": "com.amazonaws.ec2#SpotFleetRequestIdList", + "MaxResults": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "SpotFleetRequestId", - "smithy.api#documentation": "

The IDs of the Spot Fleet requests.

", - "smithy.api#xmlName": "spotFleetRequestId" + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

", + "smithy.api#xmlName": "maxResults" } } }, @@ -38387,11 +38789,16 @@ "com.amazonaws.ec2#DescribeSpotInstanceRequestsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone-group - The Availability Zone group.

    \n
  • \n
  • \n

    \n create-time - The time stamp when the Spot Instance request was\n created.

    \n
  • \n
  • \n

    \n fault-code - The fault code related to the request.

    \n
  • \n
  • \n

    \n fault-message - The fault message related to the request.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance that fulfilled the\n request.

    \n
  • \n
  • \n

    \n launch-group - The Spot Instance launch group.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.delete-on-termination - Indicates\n whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.device-name - The device name for the\n volume in the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n launch.block-device-mapping.snapshot-id - The ID of the snapshot\n for the EBS volume.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-size - The size of the EBS\n volume, in GiB.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-type - The type of EBS volume:\n gp2 or gp3 for General Purpose SSD, io1 \n or io2 for Provisioned IOPS SSD, st1 for Throughput\n Optimized HDD, sc1 for Cold HDD, or standard for\n Magnetic.

    \n
  • \n
  • \n

    \n launch.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.image-id - The ID of the AMI.

    \n
  • \n
  • \n

    \n launch.instance-type - The type of instance (for example,\n m3.medium).

    \n
  • \n
  • \n

    \n launch.kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n launch.key-name - The name of the key pair the instance launched\n with.

    \n
  • \n
  • \n

    \n launch.monitoring-enabled - Whether detailed monitoring is\n enabled for the Spot Instance.

    \n
  • \n
  • \n

    \n launch.ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n launched-availability-zone - The Availability Zone in which the\n request is launched.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Indicates whether the IP\n address is the primary private IP address.

    \n
  • \n
  • \n

    \n network-interface.delete-on-termination - Indicates whether the\n network interface is deleted when the instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.description - A description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.device-index - The index of the device for the\n network interface attachment on the instance.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of the security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The primary private IP\n address of the network interface.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n instance.

    \n
  • \n
  • \n

    \n product-description - The product description associated with the\n instance (Linux/UNIX | Windows).

    \n
  • \n
  • \n

    \n spot-instance-request-id - The Spot Instance request ID.

    \n
  • \n
  • \n

    \n spot-price - The maximum hourly price for any Spot Instance\n launched to fulfill the request.

    \n
  • \n
  • \n

    \n state - The state of the Spot Instance request (open\n | active | closed | cancelled |\n failed). Spot request status information can help you track\n your Amazon EC2 Spot Instance requests. For more information, see Spot\n request status in the Amazon EC2 User Guide.

    \n
  • \n
  • \n

    \n status-code - The short code describing the most recent\n evaluation of your Spot Instance request.

    \n
  • \n
  • \n

    \n status-message - The message explaining the status of the Spot\n Instance request.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n type - The type of Spot Instance request (one-time |\n persistent).

    \n
  • \n
  • \n

    \n valid-from - The start date of the request.

    \n
  • \n
  • \n

    \n valid-until - The end date of the request.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

" } }, "DryRun": { @@ -38409,16 +38816,11 @@ "smithy.api#xmlName": "SpotInstanceRequestId" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#Integer", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request.\n To get the next page of items, make another request with the token returned in the output.\n\t For more information, see Pagination.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone-group - The Availability Zone group.

    \n
  • \n
  • \n

    \n create-time - The time stamp when the Spot Instance request was\n created.

    \n
  • \n
  • \n

    \n fault-code - The fault code related to the request.

    \n
  • \n
  • \n

    \n fault-message - The fault message related to the request.

    \n
  • \n
  • \n

    \n instance-id - The ID of the instance that fulfilled the\n request.

    \n
  • \n
  • \n

    \n launch-group - The Spot Instance launch group.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.delete-on-termination - Indicates\n whether the EBS volume is deleted on instance termination.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.device-name - The device name for the\n volume in the block device mapping (for example, /dev/sdh or\n xvdh).

    \n
  • \n
  • \n

    \n launch.block-device-mapping.snapshot-id - The ID of the snapshot\n for the EBS volume.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-size - The size of the EBS\n volume, in GiB.

    \n
  • \n
  • \n

    \n launch.block-device-mapping.volume-type - The type of EBS volume:\n gp2 or gp3 for General Purpose SSD, io1 \n or io2 for Provisioned IOPS SSD, st1 for Throughput\n Optimized HDD, sc1 for Cold HDD, or standard for\n Magnetic.

    \n
  • \n
  • \n

    \n launch.group-id - The ID of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.group-name - The name of the security group for the\n instance.

    \n
  • \n
  • \n

    \n launch.image-id - The ID of the AMI.

    \n
  • \n
  • \n

    \n launch.instance-type - The type of instance (for example,\n m3.medium).

    \n
  • \n
  • \n

    \n launch.kernel-id - The kernel ID.

    \n
  • \n
  • \n

    \n launch.key-name - The name of the key pair the instance launched\n with.

    \n
  • \n
  • \n

    \n launch.monitoring-enabled - Whether detailed monitoring is\n enabled for the Spot Instance.

    \n
  • \n
  • \n

    \n launch.ramdisk-id - The RAM disk ID.

    \n
  • \n
  • \n

    \n launched-availability-zone - The Availability Zone in which the\n request is launched.

    \n
  • \n
  • \n

    \n network-interface.addresses.primary - Indicates whether the IP\n address is the primary private IP address.

    \n
  • \n
  • \n

    \n network-interface.delete-on-termination - Indicates whether the\n network interface is deleted when the instance is terminated.

    \n
  • \n
  • \n

    \n network-interface.description - A description of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.device-index - The index of the device for the\n network interface attachment on the instance.

    \n
  • \n
  • \n

    \n network-interface.group-id - The ID of the security group\n associated with the network interface.

    \n
  • \n
  • \n

    \n network-interface.network-interface-id - The ID of the network\n interface.

    \n
  • \n
  • \n

    \n network-interface.private-ip-address - The primary private IP\n address of the network interface.

    \n
  • \n
  • \n

    \n network-interface.subnet-id - The ID of the subnet for the\n instance.

    \n
  • \n
  • \n

    \n product-description - The product description associated with the\n instance (Linux/UNIX | Windows).

    \n
  • \n
  • \n

    \n spot-instance-request-id - The Spot Instance request ID.

    \n
  • \n
  • \n

    \n spot-price - The maximum hourly price for any Spot Instance\n launched to fulfill the request.

    \n
  • \n
  • \n

    \n state - The state of the Spot Instance request (open\n | active | closed | cancelled |\n failed). Spot request status information can help you track\n your Amazon EC2 Spot Instance requests. For more information, see Spot\n request status in the Amazon EC2 User Guide.

    \n
  • \n
  • \n

    \n status-code - The short code describing the most recent\n evaluation of your Spot Instance request.

    \n
  • \n
  • \n

    \n status-message - The message explaining the status of the Spot\n Instance request.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n type - The type of Spot Instance request (one-time |\n persistent).

    \n
  • \n
  • \n

    \n valid-from - The start date of the request.

    \n
  • \n
  • \n

    \n valid-until - The end date of the request.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -38507,21 +38909,6 @@ "com.amazonaws.ec2#DescribeSpotPriceHistoryRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone for which prices should\n be returned.

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n m3.medium).

    \n
  • \n
  • \n

    \n product-description - The product description for the Spot price\n (Linux/UNIX | Red Hat Enterprise Linux |\n SUSE Linux | Windows | Linux/UNIX (Amazon\n VPC) | Red Hat Enterprise Linux (Amazon VPC) |\n SUSE Linux (Amazon VPC) | Windows (Amazon\n VPC)).

    \n
  • \n
  • \n

    \n spot-price - The Spot price. The value must match exactly (or use\n wildcards; greater than or less than comparison is not supported).

    \n
  • \n
  • \n

    \n timestamp - The time stamp of the Spot price history, in UTC format\n (for example, ddd MMM dd\n HH:mm:ss UTC\n YYYY). You can use wildcards (* and\n ?). Greater than or less than comparison is not\n supported.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

Filters the results by the specified Availability Zone.

", - "smithy.api#xmlName": "availabilityZone" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -38530,6 +38917,14 @@ "smithy.api#xmlName": "dryRun" } }, + "StartTime": { + "target": "com.amazonaws.ec2#DateTime", + "traits": { + "aws.protocols#ec2QueryName": "StartTime", + "smithy.api#documentation": "

The date and time, up to the past 90 days, from which to start retrieving the price\n history data, in UTC format (for example,\n YYYY-MM-DDTHH:MM:SSZ).

", + "smithy.api#xmlName": "startTime" + } + }, "EndTime": { "target": "com.amazonaws.ec2#DateTime", "traits": { @@ -38545,6 +38940,28 @@ "smithy.api#xmlName": "InstanceType" } }, + "ProductDescriptions": { + "target": "com.amazonaws.ec2#ProductDescriptionList", + "traits": { + "smithy.api#documentation": "

Filters the results by the specified basic product descriptions.

", + "smithy.api#xmlName": "ProductDescription" + } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n availability-zone - The Availability Zone for which prices should\n be returned.

    \n
  • \n
  • \n

    \n instance-type - The type of instance (for example,\n m3.medium).

    \n
  • \n
  • \n

    \n product-description - The product description for the Spot price\n (Linux/UNIX | Red Hat Enterprise Linux |\n SUSE Linux | Windows | Linux/UNIX (Amazon\n VPC) | Red Hat Enterprise Linux (Amazon VPC) |\n SUSE Linux (Amazon VPC) | Windows (Amazon\n VPC)).

    \n
  • \n
  • \n

    \n spot-price - The Spot price. The value must match exactly (or use\n wildcards; greater than or less than comparison is not supported).

    \n
  • \n
  • \n

    \n timestamp - The time stamp of the Spot price history, in UTC format\n (for example, ddd MMM dd\n HH:mm:ss UTC\n YYYY). You can use wildcards (* and\n ?). Greater than or less than comparison is not\n supported.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

Filters the results by the specified Availability Zone.

", + "smithy.api#xmlName": "availabilityZone" + } + }, "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -38560,21 +38977,6 @@ "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", "smithy.api#xmlName": "nextToken" } - }, - "ProductDescriptions": { - "target": "com.amazonaws.ec2#ProductDescriptionList", - "traits": { - "smithy.api#documentation": "

Filters the results by the specified basic product descriptions.

", - "smithy.api#xmlName": "ProductDescription" - } - }, - "StartTime": { - "target": "com.amazonaws.ec2#DateTime", - "traits": { - "aws.protocols#ec2QueryName": "StartTime", - "smithy.api#documentation": "

The date and time, up to the past 90 days, from which to start retrieving the price\n history data, in UTC format (for example,\n YYYY-MM-DDTHH:MM:SSZ).

", - "smithy.api#xmlName": "startTime" - } } }, "traits": { @@ -38920,14 +39322,6 @@ "smithy.api#xmlName": "SubnetId" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -38939,6 +39333,14 @@ "traits": { "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -38948,14 +39350,6 @@ "com.amazonaws.ec2#DescribeSubnetsResult": { "type": "structure", "members": { - "Subnets": { - "target": "com.amazonaws.ec2#SubnetList", - "traits": { - "aws.protocols#ec2QueryName": "SubnetSet", - "smithy.api#documentation": "

Information about the subnets.

", - "smithy.api#xmlName": "subnetSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -38963,6 +39357,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "Subnets": { + "target": "com.amazonaws.ec2#SubnetList", + "traits": { + "aws.protocols#ec2QueryName": "SubnetSet", + "smithy.api#documentation": "

Information about the subnets.

", + "smithy.api#xmlName": "subnetSet" + } } }, "traits": { @@ -40948,13 +41350,6 @@ "com.amazonaws.ec2#DescribeVolumeStatusRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n action.code - The action code for the event (for example,\n enable-volume-io).

    \n
  • \n
  • \n

    \n action.description - A description of the action.

    \n
  • \n
  • \n

    \n action.event-id - The event ID associated with the action.

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n event.description - A description of the event.

    \n
  • \n
  • \n

    \n event.event-id - The event ID.

    \n
  • \n
  • \n

    \n event.event-type - The event type (for io-enabled:\n passed | failed; for io-performance:\n io-performance:degraded | io-performance:severely-degraded |\n io-performance:stalled).

    \n
  • \n
  • \n

    \n event.not-after - The latest end time for the event.

    \n
  • \n
  • \n

    \n event.not-before - The earliest start time for the event.

    \n
  • \n
  • \n

    \n volume-status.details-name - The cause for\n volume-status.status (io-enabled |\n io-performance).

    \n
  • \n
  • \n

    \n volume-status.details-status - The status of\n volume-status.details-name (for io-enabled:\n passed | failed; for io-performance:\n normal | degraded | severely-degraded |\n stalled).

    \n
  • \n
  • \n

    \n volume-status.status - The status of the volume (ok |\n impaired | warning | insufficient-data).

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "MaxResults": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -40981,6 +41376,13 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n action.code - The action code for the event (for example,\n enable-volume-io).

    \n
  • \n
  • \n

    \n action.description - A description of the action.

    \n
  • \n
  • \n

    \n action.event-id - The event ID associated with the action.

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone of the instance.

    \n
  • \n
  • \n

    \n event.description - A description of the event.

    \n
  • \n
  • \n

    \n event.event-id - The event ID.

    \n
  • \n
  • \n

    \n event.event-type - The event type (for io-enabled:\n passed | failed; for io-performance:\n io-performance:degraded | io-performance:severely-degraded |\n io-performance:stalled).

    \n
  • \n
  • \n

    \n event.not-after - The latest end time for the event.

    \n
  • \n
  • \n

    \n event.not-before - The earliest start time for the event.

    \n
  • \n
  • \n

    \n volume-status.details-name - The cause for\n volume-status.status (io-enabled |\n io-performance).

    \n
  • \n
  • \n

    \n volume-status.details-status - The status of\n volume-status.details-name (for io-enabled:\n passed | failed; for io-performance:\n normal | degraded | severely-degraded |\n stalled).

    \n
  • \n
  • \n

    \n volume-status.status - The status of the volume (ok |\n impaired | warning | insufficient-data).

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -41239,14 +41641,6 @@ "com.amazonaws.ec2#DescribeVolumesModificationsResult": { "type": "structure", "members": { - "VolumesModifications": { - "target": "com.amazonaws.ec2#VolumeModificationList", - "traits": { - "aws.protocols#ec2QueryName": "VolumeModificationSet", - "smithy.api#documentation": "

Information about the volume modifications.

", - "smithy.api#xmlName": "volumeModificationSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -41254,6 +41648,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "VolumesModifications": { + "target": "com.amazonaws.ec2#VolumeModificationList", + "traits": { + "aws.protocols#ec2QueryName": "VolumeModificationSet", + "smithy.api#documentation": "

Information about the volume modifications.

", + "smithy.api#xmlName": "volumeModificationSet" + } } }, "traits": { @@ -41263,13 +41665,6 @@ "com.amazonaws.ec2#DescribeVolumesRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n attachment.attach-time - The time stamp when the attachment\n initiated.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Whether the volume is deleted on\n instance termination.

    \n
  • \n
  • \n

    \n attachment.device - The device name specified in the block device mapping\n (for example, /dev/sda1).

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance the volume is attached\n to.

    \n
  • \n
  • \n

    \n attachment.status - The attachment state (attaching |\n attached | detaching).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone in which the volume was\n created.

    \n
  • \n
  • \n

    \n create-time - The time stamp when the volume was created.

    \n
  • \n
  • \n

    \n encrypted - Indicates whether the volume is encrypted (true\n | false)

    \n
  • \n
  • \n

    \n multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach (true\n \t\t\t| false)

    \n
  • \n
  • \n

    \n fast-restored - Indicates whether the volume was created from a \n snapshot that is enabled for fast snapshot restore (true | \n false).

    \n
  • \n
  • \n

    \n size - The size of the volume, in GiB.

    \n
  • \n
  • \n

    \n snapshot-id - The snapshot from which the volume was created.

    \n
  • \n
  • \n

    \n status - The state of the volume (creating |\n available | in-use | deleting |\n deleted | error).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n volume-id - The volume ID.

    \n
  • \n
  • \n

    \n volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | \n st1 | sc1| standard)

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "VolumeIds": { "target": "com.amazonaws.ec2#VolumeIdStringList", "traits": { @@ -41285,12 +41680,11 @@ "smithy.api#xmlName": "dryRun" } }, - "MaxResults": { - "target": "com.amazonaws.ec2#Integer", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "aws.protocols#ec2QueryName": "MaxResults", - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output. \n\tFor more information, see Pagination.

", - "smithy.api#xmlName": "maxResults" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n attachment.attach-time - The time stamp when the attachment\n initiated.

    \n
  • \n
  • \n

    \n attachment.delete-on-termination - Whether the volume is deleted on\n instance termination.

    \n
  • \n
  • \n

    \n attachment.device - The device name specified in the block device mapping\n (for example, /dev/sda1).

    \n
  • \n
  • \n

    \n attachment.instance-id - The ID of the instance the volume is attached\n to.

    \n
  • \n
  • \n

    \n attachment.status - The attachment state (attaching |\n attached | detaching).

    \n
  • \n
  • \n

    \n availability-zone - The Availability Zone in which the volume was\n created.

    \n
  • \n
  • \n

    \n create-time - The time stamp when the volume was created.

    \n
  • \n
  • \n

    \n encrypted - Indicates whether the volume is encrypted (true\n | false)

    \n
  • \n
  • \n

    \n multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach (true\n \t\t\t| false)

    \n
  • \n
  • \n

    \n fast-restored - Indicates whether the volume was created from a \n snapshot that is enabled for fast snapshot restore (true | \n false).

    \n
  • \n
  • \n

    \n size - The size of the volume, in GiB.

    \n
  • \n
  • \n

    \n snapshot-id - The snapshot from which the volume was created.

    \n
  • \n
  • \n

    \n status - The state of the volume (creating |\n available | in-use | deleting |\n deleted | error).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n volume-id - The volume ID.

    \n
  • \n
  • \n

    \n volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | \n st1 | sc1| standard)

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } }, "NextToken": { @@ -41300,6 +41694,14 @@ "smithy.api#documentation": "

The token returned from a previous paginated request.\n Pagination continues from the end of the items returned by the previous request.

", "smithy.api#xmlName": "nextToken" } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "MaxResults", + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output. \n\tFor more information, see Pagination.

", + "smithy.api#xmlName": "maxResults" + } } }, "traits": { @@ -41309,14 +41711,6 @@ "com.amazonaws.ec2#DescribeVolumesResult": { "type": "structure", "members": { - "Volumes": { - "target": "com.amazonaws.ec2#VolumeList", - "traits": { - "aws.protocols#ec2QueryName": "VolumeSet", - "smithy.api#documentation": "

Information about the volumes.

", - "smithy.api#xmlName": "volumeSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -41324,6 +41718,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. \n This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "Volumes": { + "target": "com.amazonaws.ec2#VolumeList", + "traits": { + "aws.protocols#ec2QueryName": "VolumeSet", + "smithy.api#documentation": "

Information about the volumes.

", + "smithy.api#xmlName": "volumeSet" + } } }, "traits": { @@ -41407,14 +41809,6 @@ "com.amazonaws.ec2#DescribeVpcAttributeResult": { "type": "structure", "members": { - "VpcId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpcId", - "smithy.api#documentation": "

The ID of the VPC.

", - "smithy.api#xmlName": "vpcId" - } - }, "EnableDnsHostnames": { "target": "com.amazonaws.ec2#AttributeBooleanValue", "traits": { @@ -41438,6 +41832,14 @@ "smithy.api#documentation": "

Indicates whether Network Address Usage metrics are enabled for your VPC.

", "smithy.api#xmlName": "enableNetworkAddressUsageMetrics" } + }, + "VpcId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "

The ID of the VPC.

", + "smithy.api#xmlName": "vpcId" + } } }, "traits": { @@ -41495,6 +41897,13 @@ "com.amazonaws.ec2#DescribeVpcClassicLinkDnsSupportRequest": { "type": "structure", "members": { + "VpcIds": { + "target": "com.amazonaws.ec2#VpcClassicLinkIdList", + "traits": { + "smithy.api#documentation": "

The IDs of the VPCs.

", + "smithy.api#xmlName": "VpcIds" + } + }, "MaxResults": { "target": "com.amazonaws.ec2#DescribeVpcClassicLinkDnsSupportMaxResults", "traits": { @@ -41510,13 +41919,6 @@ "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

", "smithy.api#xmlName": "nextToken" } - }, - "VpcIds": { - "target": "com.amazonaws.ec2#VpcClassicLinkIdList", - "traits": { - "smithy.api#documentation": "

The IDs of the VPCs.

", - "smithy.api#xmlName": "VpcIds" - } } }, "traits": { @@ -41550,13 +41952,6 @@ "com.amazonaws.ec2#DescribeVpcClassicLinkRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", - "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n is-classic-link-enabled - Whether the VPC is enabled for ClassicLink\n\t\t\t\t\t (true | false).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -41571,6 +41966,13 @@ "smithy.api#documentation": "

The VPCs for which you want to describe the ClassicLink status.

", "smithy.api#xmlName": "VpcId" } + }, + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n is-classic-link-enabled - Whether the VPC is enabled for ClassicLink\n\t\t\t\t\t (true | false).

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" + } } }, "traits": { @@ -42153,11 +42555,16 @@ "com.amazonaws.ec2#DescribeVpcPeeringConnectionsRequest": { "type": "structure", "members": { - "Filters": { - "target": "com.amazonaws.ec2#FilterList", + "NextToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n accepter-vpc-info.cidr-block - The IPv4 CIDR block of the accepter\n VPC.

    \n
  • \n
  • \n

    \n accepter-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the\n accepter VPC.

    \n
  • \n
  • \n

    \n accepter-vpc-info.vpc-id - The ID of the accepter VPC.

    \n
  • \n
  • \n

    \n expiration-time - The expiration date and time for the VPC peering\n connection.

    \n
  • \n
  • \n

    \n requester-vpc-info.cidr-block - The IPv4 CIDR block of the\n requester's VPC.

    \n
  • \n
  • \n

    \n requester-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the\n requester VPC.

    \n
  • \n
  • \n

    \n requester-vpc-info.vpc-id - The ID of the requester VPC.

    \n
  • \n
  • \n

    \n status-code - The status of the VPC peering connection\n (pending-acceptance | failed |\n expired | provisioning | active |\n deleting | deleted |\n rejected).

    \n
  • \n
  • \n

    \n status-message - A message that provides more information about the status\n of the VPC peering connection, if applicable.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-peering-connection-id - The ID of the VPC peering connection.

    \n
  • \n
", - "smithy.api#xmlName": "Filter" + "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeVpcPeeringConnectionsMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } }, "DryRun": { @@ -42175,16 +42582,11 @@ "smithy.api#xmlName": "VpcPeeringConnectionId" } }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The token returned from a previous paginated request. Pagination continues from the end of the items returned by the previous request.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.ec2#DescribeVpcPeeringConnectionsMaxResults", + "Filters": { + "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" + "smithy.api#documentation": "

The filters.

\n
    \n
  • \n

    \n accepter-vpc-info.cidr-block - The IPv4 CIDR block of the accepter\n VPC.

    \n
  • \n
  • \n

    \n accepter-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the\n accepter VPC.

    \n
  • \n
  • \n

    \n accepter-vpc-info.vpc-id - The ID of the accepter VPC.

    \n
  • \n
  • \n

    \n expiration-time - The expiration date and time for the VPC peering\n connection.

    \n
  • \n
  • \n

    \n requester-vpc-info.cidr-block - The IPv4 CIDR block of the\n requester's VPC.

    \n
  • \n
  • \n

    \n requester-vpc-info.owner-id - The ID of the Amazon Web Services account that owns the\n requester VPC.

    \n
  • \n
  • \n

    \n requester-vpc-info.vpc-id - The ID of the requester VPC.

    \n
  • \n
  • \n

    \n status-code - The status of the VPC peering connection\n (pending-acceptance | failed |\n expired | provisioning | active |\n deleting | deleted |\n rejected).

    \n
  • \n
  • \n

    \n status-message - A message that provides more information about the status\n of the VPC peering connection, if applicable.

    \n
  • \n
  • \n

    \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

    \n
  • \n
  • \n

    \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

    \n
  • \n
  • \n

    \n vpc-peering-connection-id - The ID of the VPC peering connection.

    \n
  • \n
", + "smithy.api#xmlName": "Filter" } } }, @@ -42326,14 +42728,6 @@ "smithy.api#xmlName": "VpcId" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -42345,6 +42739,14 @@ "traits": { "smithy.api#documentation": "

The maximum number of items to return for this request.\n\tTo get the next page of items, make another request with the token returned in the output.\n\tFor more information, see Pagination.

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -42354,14 +42756,6 @@ "com.amazonaws.ec2#DescribeVpcsResult": { "type": "structure", "members": { - "Vpcs": { - "target": "com.amazonaws.ec2#VpcList", - "traits": { - "aws.protocols#ec2QueryName": "VpcSet", - "smithy.api#documentation": "

Information about the VPCs.

", - "smithy.api#xmlName": "vpcSet" - } - }, "NextToken": { "target": "com.amazonaws.ec2#String", "traits": { @@ -42369,6 +42763,14 @@ "smithy.api#documentation": "

The token to include in another request to get the next page of items. This value is null when there are no more items to return.

", "smithy.api#xmlName": "nextToken" } + }, + "Vpcs": { + "target": "com.amazonaws.ec2#VpcList", + "traits": { + "aws.protocols#ec2QueryName": "VpcSet", + "smithy.api#documentation": "

Information about the VPCs.

", + "smithy.api#xmlName": "vpcSet" + } } }, "traits": { @@ -42778,6 +43180,14 @@ "com.amazonaws.ec2#DetachNetworkInterfaceRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "AttachmentId": { "target": "com.amazonaws.ec2#NetworkInterfaceAttachmentId", "traits": { @@ -42788,14 +43198,6 @@ "smithy.api#xmlName": "attachmentId" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "Force": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -43107,22 +43509,6 @@ "com.amazonaws.ec2#DhcpOptions": { "type": "structure", "members": { - "DhcpConfigurations": { - "target": "com.amazonaws.ec2#DhcpConfigurationList", - "traits": { - "aws.protocols#ec2QueryName": "DhcpConfigurationSet", - "smithy.api#documentation": "

The DHCP options in the set.

", - "smithy.api#xmlName": "dhcpConfigurationSet" - } - }, - "DhcpOptionsId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DhcpOptionsId", - "smithy.api#documentation": "

The ID of the set of DHCP options.

", - "smithy.api#xmlName": "dhcpOptionsId" - } - }, "OwnerId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -43138,6 +43524,22 @@ "smithy.api#documentation": "

Any tags assigned to the DHCP options set.

", "smithy.api#xmlName": "tagSet" } + }, + "DhcpOptionsId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DhcpOptionsId", + "smithy.api#documentation": "

The ID of the set of DHCP options.

", + "smithy.api#xmlName": "dhcpOptionsId" + } + }, + "DhcpConfigurations": { + "target": "com.amazonaws.ec2#DhcpConfigurationList", + "traits": { + "aws.protocols#ec2QueryName": "DhcpConfigurationSet", + "smithy.api#documentation": "

The DHCP options in the set.

", + "smithy.api#xmlName": "dhcpConfigurationSet" + } } }, "traits": { @@ -44323,6 +44725,64 @@ "smithy.api#input": {} } }, + "com.amazonaws.ec2#DisassociateCapacityReservationBillingOwner": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DisassociateCapacityReservationBillingOwnerRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DisassociateCapacityReservationBillingOwnerResult" + }, + "traits": { + "smithy.api#documentation": "

Cancels a pending request to assign billing of the unused capacity of a Capacity Reservation to a \n\t\t\t\tconsumer account, or revokes a request that has already been accepted. For more information, see \n\t\t\t\tBilling assignment for \n\t\t\t\t\tshared Amazon EC2 Capacity Reservations.

" + } + }, + "com.amazonaws.ec2#DisassociateCapacityReservationBillingOwnerRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "CapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the Capacity Reservation.

", + "smithy.api#required": {} + } + }, + "UnusedReservationBillingOwnerId": { + "target": "com.amazonaws.ec2#AccountID", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the consumer account to which the request was sent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#DisassociateCapacityReservationBillingOwnerResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, it returns an error.

", + "smithy.api#xmlName": "return" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#DisassociateClientVpnTargetNetwork": { "type": "operation", "input": { @@ -44774,6 +45234,14 @@ "com.amazonaws.ec2#DisassociateRouteTableRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "AssociationId": { "target": "com.amazonaws.ec2#RouteTableAssociationId", "traits": { @@ -44783,14 +45251,6 @@ "smithy.api#required": {}, "smithy.api#xmlName": "associationId" } - }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } } }, "traits": { @@ -45232,16 +45692,6 @@ "com.amazonaws.ec2#DiskImageDetail": { "type": "structure", "members": { - "Bytes": { - "target": "com.amazonaws.ec2#Long", - "traits": { - "aws.protocols#ec2QueryName": "Bytes", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The size of the disk image, in GiB.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "bytes" - } - }, "Format": { "target": "com.amazonaws.ec2#DiskImageFormat", "traits": { @@ -45252,6 +45702,16 @@ "smithy.api#xmlName": "format" } }, + "Bytes": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "aws.protocols#ec2QueryName": "Bytes", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The size of the disk image, in GiB.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "bytes" + } + }, "ImportManifestUrl": { "target": "com.amazonaws.ec2#ImportManifestUrl", "traits": { @@ -45798,14 +46258,6 @@ "com.amazonaws.ec2#EbsInstanceBlockDeviceSpecification": { "type": "structure", "members": { - "DeleteOnTermination": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DeleteOnTermination", - "smithy.api#documentation": "

Indicates whether the volume is deleted on instance termination.

", - "smithy.api#xmlName": "deleteOnTermination" - } - }, "VolumeId": { "target": "com.amazonaws.ec2#VolumeId", "traits": { @@ -45813,6 +46265,14 @@ "smithy.api#documentation": "

The ID of the EBS volume.

", "smithy.api#xmlName": "volumeId" } + }, + "DeleteOnTermination": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DeleteOnTermination", + "smithy.api#documentation": "

Indicates whether the volume is deleted on instance termination.

", + "smithy.api#xmlName": "deleteOnTermination" + } } }, "traits": { @@ -46316,7 +46776,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes the association between an instance and an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
\n

Describes the association between an instance and an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticGpuAssociationList": { @@ -46341,7 +46801,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes the status of an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
\n

Describes the status of an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticGpuId": { @@ -46378,7 +46838,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

A specification for an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
\n

A specification for an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticGpuSpecificationList": { @@ -46397,13 +46857,13 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Type", - "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
", + "smithy.api#documentation": "

The elastic GPU type.

", "smithy.api#xmlName": "type" } } }, "traits": { - "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
\n

Describes an elastic GPU.

" } }, "com.amazonaws.ec2#ElasticGpuSpecificationResponseList": { @@ -46513,7 +46973,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, \n we recommend that you use Amazon EC2 G4, G5, or G6 instances.

\n
\n

Describes an Elastic Graphics accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
\n

Describes an Elastic Graphics accelerator.

" } }, "com.amazonaws.ec2#ElasticInferenceAccelerator": { @@ -46535,7 +46995,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Describes an elastic inference accelerator. \n

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Describes an elastic inference accelerator. \n

" } }, "com.amazonaws.ec2#ElasticInferenceAcceleratorAssociation": { @@ -46575,7 +47035,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n Describes the association between an instance and an elastic inference accelerator. \n

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Describes the association between an instance and an elastic inference accelerator. \n

" } }, "com.amazonaws.ec2#ElasticInferenceAcceleratorAssociationList": { @@ -49195,14 +49655,6 @@ "com.amazonaws.ec2#ExportToS3TaskSpecification": { "type": "structure", "members": { - "ContainerFormat": { - "target": "com.amazonaws.ec2#ContainerFormat", - "traits": { - "aws.protocols#ec2QueryName": "ContainerFormat", - "smithy.api#documentation": "

The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is\n exported.

", - "smithy.api#xmlName": "containerFormat" - } - }, "DiskImageFormat": { "target": "com.amazonaws.ec2#DiskImageFormat", "traits": { @@ -49211,6 +49663,14 @@ "smithy.api#xmlName": "diskImageFormat" } }, + "ContainerFormat": { + "target": "com.amazonaws.ec2#ContainerFormat", + "traits": { + "aws.protocols#ec2QueryName": "ContainerFormat", + "smithy.api#documentation": "

The container format used to combine disk images with metadata (such as OVF). If absent, only the disk image is\n exported.

", + "smithy.api#xmlName": "containerFormat" + } + }, "S3Bucket": { "target": "com.amazonaws.ec2#String", "traits": { @@ -49970,6 +50430,18 @@ "traits": { "smithy.api#enumValue": "use-capacity-reservations-first" } + }, + "USE_CAPACITY_RESERVATIONS_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "use-capacity-reservations-only" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "none" + } } } }, @@ -51788,18 +52260,18 @@ "smithy.api#required": {} } }, - "DryRun": { + "Latest": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" + "smithy.api#documentation": "

When enabled, retrieves the latest console output for the instance.

\n

Default: disabled (false)

" } }, - "Latest": { + "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

When enabled, retrieves the latest console output for the instance.

\n

Default: disabled (false)

" + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" } } }, @@ -51818,14 +52290,6 @@ "smithy.api#xmlName": "instanceId" } }, - "Output": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Output", - "smithy.api#documentation": "

The console output, base64-encoded. If you are using a command line tool, the tool\n decodes the output for you.

", - "smithy.api#xmlName": "output" - } - }, "Timestamp": { "target": "com.amazonaws.ec2#DateTime", "traits": { @@ -51833,6 +52297,14 @@ "smithy.api#documentation": "

The time at which the output was last updated.

", "smithy.api#xmlName": "timestamp" } + }, + "Output": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Output", + "smithy.api#documentation": "

The console output, base64-encoded. If you are using a command line tool, the tool\n decodes the output for you.

", + "smithy.api#xmlName": "output" + } } }, "traits": { @@ -51857,7 +52329,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "InstanceId": { @@ -51921,7 +52393,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "InstanceFamily": { @@ -52333,7 +52805,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } } }, @@ -52569,7 +53041,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } } }, @@ -53743,7 +54215,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -53763,14 +54235,6 @@ "smithy.api#xmlName": "instanceId" } }, - "PasswordData": { - "target": "com.amazonaws.ec2#PasswordData", - "traits": { - "aws.protocols#ec2QueryName": "PasswordData", - "smithy.api#documentation": "

The password of the instance. Returns an empty string if the password is not\n available.

", - "smithy.api#xmlName": "passwordData" - } - }, "Timestamp": { "target": "com.amazonaws.ec2#DateTime", "traits": { @@ -53778,6 +54242,14 @@ "smithy.api#documentation": "

The time the data was last updated.

", "smithy.api#xmlName": "timestamp" } + }, + "PasswordData": { + "target": "com.amazonaws.ec2#PasswordData", + "traits": { + "aws.protocols#ec2QueryName": "PasswordData", + "smithy.api#documentation": "

The password of the instance. Returns an empty string if the password is not\n available.

", + "smithy.api#xmlName": "passwordData" + } } }, "traits": { @@ -55312,14 +55784,6 @@ "com.amazonaws.ec2#GroupIdentifier": { "type": "structure", "members": { - "GroupName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "GroupName", - "smithy.api#documentation": "

The name of the security group.

", - "smithy.api#xmlName": "groupName" - } - }, "GroupId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -55327,6 +55791,14 @@ "smithy.api#documentation": "

The ID of the security group.

", "smithy.api#xmlName": "groupId" } + }, + "GroupName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "GroupName", + "smithy.api#documentation": "

The name of the security group.

", + "smithy.api#xmlName": "groupName" + } } }, "traits": { @@ -56351,78 +56823,6 @@ "com.amazonaws.ec2#Image": { "type": "structure", "members": { - "Architecture": { - "target": "com.amazonaws.ec2#ArchitectureValues", - "traits": { - "aws.protocols#ec2QueryName": "Architecture", - "smithy.api#documentation": "

The architecture of the image.

", - "smithy.api#xmlName": "architecture" - } - }, - "CreationDate": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CreationDate", - "smithy.api#documentation": "

The date and time the image was created.

", - "smithy.api#xmlName": "creationDate" - } - }, - "ImageId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ImageId", - "smithy.api#documentation": "

The ID of the AMI.

", - "smithy.api#xmlName": "imageId" - } - }, - "ImageLocation": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ImageLocation", - "smithy.api#documentation": "

The location of the AMI.

", - "smithy.api#xmlName": "imageLocation" - } - }, - "ImageType": { - "target": "com.amazonaws.ec2#ImageTypeValues", - "traits": { - "aws.protocols#ec2QueryName": "ImageType", - "smithy.api#documentation": "

The type of image.

", - "smithy.api#xmlName": "imageType" - } - }, - "Public": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "IsPublic", - "smithy.api#documentation": "

Indicates whether the image has public launch permissions. The value is true if\n\t\t\t\tthis image has public launch permissions or false\n\t\t\t\tif it has only implicit and explicit launch permissions.

", - "smithy.api#xmlName": "isPublic" - } - }, - "KernelId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "KernelId", - "smithy.api#documentation": "

The kernel associated with the image, if any. Only applicable for machine images.

", - "smithy.api#xmlName": "kernelId" - } - }, - "OwnerId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ImageOwnerId", - "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the image.

", - "smithy.api#xmlName": "imageOwnerId" - } - }, - "Platform": { - "target": "com.amazonaws.ec2#PlatformValues", - "traits": { - "aws.protocols#ec2QueryName": "Platform", - "smithy.api#documentation": "

This value is set to windows for Windows AMIs; otherwise, it is blank.

", - "smithy.api#xmlName": "platform" - } - }, "PlatformDetails": { "target": "com.amazonaws.ec2#String", "traits": { @@ -56439,30 +56839,6 @@ "smithy.api#xmlName": "usageOperation" } }, - "ProductCodes": { - "target": "com.amazonaws.ec2#ProductCodeList", - "traits": { - "aws.protocols#ec2QueryName": "ProductCodes", - "smithy.api#documentation": "

Any product codes associated with the AMI.

", - "smithy.api#xmlName": "productCodes" - } - }, - "RamdiskId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "RamdiskId", - "smithy.api#documentation": "

The RAM disk associated with the image, if any. Only applicable for machine images.

", - "smithy.api#xmlName": "ramdiskId" - } - }, - "State": { - "target": "com.amazonaws.ec2#ImageState", - "traits": { - "aws.protocols#ec2QueryName": "ImageState", - "smithy.api#documentation": "

The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

", - "smithy.api#xmlName": "imageState" - } - }, "BlockDeviceMappings": { "target": "com.amazonaws.ec2#BlockDeviceMappingList", "traits": { @@ -56614,22 +56990,6 @@ "smithy.api#documentation": "

The date and time, in ISO 8601 date-time\n format, when the AMI was last used to launch an EC2 instance. When the AMI is used\n to launch an instance, there is a 24-hour delay before that usage is reported.

\n \n

\n lastLaunchedTime data is available starting April 2017.

\n
", "smithy.api#xmlName": "lastLaunchedTime" } - } - }, - "traits": { - "smithy.api#documentation": "

Describes an image.

" - } - }, - "com.amazonaws.ec2#ImageAttribute": { - "type": "structure", - "members": { - "BlockDeviceMappings": { - "target": "com.amazonaws.ec2#BlockDeviceMappingList", - "traits": { - "aws.protocols#ec2QueryName": "BlockDeviceMapping", - "smithy.api#documentation": "

The block device mapping entries.

", - "smithy.api#xmlName": "blockDeviceMapping" - } }, "ImageId": { "target": "com.amazonaws.ec2#String", @@ -56639,22 +56999,102 @@ "smithy.api#xmlName": "imageId" } }, - "LaunchPermissions": { - "target": "com.amazonaws.ec2#LaunchPermissionList", + "ImageLocation": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "LaunchPermission", - "smithy.api#documentation": "

The launch permissions.

", - "smithy.api#xmlName": "launchPermission" + "aws.protocols#ec2QueryName": "ImageLocation", + "smithy.api#documentation": "

The location of the AMI.

", + "smithy.api#xmlName": "imageLocation" + } + }, + "State": { + "target": "com.amazonaws.ec2#ImageState", + "traits": { + "aws.protocols#ec2QueryName": "ImageState", + "smithy.api#documentation": "

The current state of the AMI. If the state is available, the image is successfully registered and can be used to launch an instance.

", + "smithy.api#xmlName": "imageState" + } + }, + "OwnerId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ImageOwnerId", + "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the image.

", + "smithy.api#xmlName": "imageOwnerId" + } + }, + "CreationDate": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CreationDate", + "smithy.api#documentation": "

The date and time the image was created.

", + "smithy.api#xmlName": "creationDate" + } + }, + "Public": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "IsPublic", + "smithy.api#documentation": "

Indicates whether the image has public launch permissions. The value is true if\n\t\t\t\tthis image has public launch permissions or false\n\t\t\t\tif it has only implicit and explicit launch permissions.

", + "smithy.api#xmlName": "isPublic" } }, "ProductCodes": { "target": "com.amazonaws.ec2#ProductCodeList", "traits": { "aws.protocols#ec2QueryName": "ProductCodes", - "smithy.api#documentation": "

The product codes.

", + "smithy.api#documentation": "

Any product codes associated with the AMI.

", "smithy.api#xmlName": "productCodes" } }, + "Architecture": { + "target": "com.amazonaws.ec2#ArchitectureValues", + "traits": { + "aws.protocols#ec2QueryName": "Architecture", + "smithy.api#documentation": "

The architecture of the image.

", + "smithy.api#xmlName": "architecture" + } + }, + "ImageType": { + "target": "com.amazonaws.ec2#ImageTypeValues", + "traits": { + "aws.protocols#ec2QueryName": "ImageType", + "smithy.api#documentation": "

The type of image.

", + "smithy.api#xmlName": "imageType" + } + }, + "KernelId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "KernelId", + "smithy.api#documentation": "

The kernel associated with the image, if any. Only applicable for machine images.

", + "smithy.api#xmlName": "kernelId" + } + }, + "RamdiskId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "RamdiskId", + "smithy.api#documentation": "

The RAM disk associated with the image, if any. Only applicable for machine images.

", + "smithy.api#xmlName": "ramdiskId" + } + }, + "Platform": { + "target": "com.amazonaws.ec2#PlatformValues", + "traits": { + "aws.protocols#ec2QueryName": "Platform", + "smithy.api#documentation": "

This value is set to windows for Windows AMIs; otherwise, it is blank.

", + "smithy.api#xmlName": "platform" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an image.

" + } + }, + "com.amazonaws.ec2#ImageAttribute": { + "type": "structure", + "members": { "Description": { "target": "com.amazonaws.ec2#AttributeValue", "traits": { @@ -56734,6 +57174,38 @@ "smithy.api#documentation": "

Indicates whether deregistration protection is enabled for the AMI.

", "smithy.api#xmlName": "deregistrationProtection" } + }, + "ImageId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ImageId", + "smithy.api#documentation": "

The ID of the AMI.

", + "smithy.api#xmlName": "imageId" + } + }, + "LaunchPermissions": { + "target": "com.amazonaws.ec2#LaunchPermissionList", + "traits": { + "aws.protocols#ec2QueryName": "LaunchPermission", + "smithy.api#documentation": "

The launch permissions.

", + "smithy.api#xmlName": "launchPermission" + } + }, + "ProductCodes": { + "target": "com.amazonaws.ec2#ProductCodeList", + "traits": { + "aws.protocols#ec2QueryName": "ProductCodes", + "smithy.api#documentation": "

The product codes.

", + "smithy.api#xmlName": "productCodes" + } + }, + "BlockDeviceMappings": { + "target": "com.amazonaws.ec2#BlockDeviceMappingList", + "traits": { + "aws.protocols#ec2QueryName": "BlockDeviceMapping", + "smithy.api#documentation": "

The block device mapping entries.

", + "smithy.api#xmlName": "blockDeviceMapping" + } } }, "traits": { @@ -57597,20 +58069,12 @@ "target": "com.amazonaws.ec2#ImportInstanceResult" }, "traits": { - "smithy.api#documentation": "\n

We recommend that you use the \n ImportImage\n \n API. For more information, see Importing a VM as an image using VM\n Import/Export in the VM Import/Export User Guide.

\n
\n

Creates an import instance task using metadata from the specified disk image.

\n

This API action is not supported by the Command Line Interface (CLI). For\n information about using the Amazon EC2 CLI, which is deprecated, see Importing\n a VM to Amazon EC2 in the Amazon EC2 CLI Reference PDF file.

\n

This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage\n instead.

\n

For information about the import manifest referenced by this API action, see VM Import Manifest.

" + "smithy.api#documentation": "\n

We recommend that you use the \n ImportImage\n \n API instead. For more information, see Importing a VM as an image using VM\n Import/Export in the VM Import/Export User Guide.

\n
\n

Creates an import instance task using metadata from the specified disk image.

\n

This API action supports only single-volume VMs. To import multi-volume VMs, use ImportImage\n instead.

\n

For information about the import manifest referenced by this API action, see VM Import Manifest.

\n

This API action is not supported by the Command Line Interface (CLI).

" } }, "com.amazonaws.ec2#ImportInstanceLaunchSpecification": { "type": "structure", "members": { - "AdditionalInfo": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AdditionalInfo", - "smithy.api#documentation": "

Reserved.

", - "smithy.api#xmlName": "additionalInfo" - } - }, "Architecture": { "target": "com.amazonaws.ec2#ArchitectureValues", "traits": { @@ -57619,6 +58083,13 @@ "smithy.api#xmlName": "architecture" } }, + "GroupNames": { + "target": "com.amazonaws.ec2#SecurityGroupStringList", + "traits": { + "smithy.api#documentation": "

The security group names.

", + "smithy.api#xmlName": "GroupName" + } + }, "GroupIds": { "target": "com.amazonaws.ec2#SecurityGroupIdStringList", "traits": { @@ -57626,19 +58097,20 @@ "smithy.api#xmlName": "GroupId" } }, - "GroupNames": { - "target": "com.amazonaws.ec2#SecurityGroupStringList", + "AdditionalInfo": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The security group names.

", - "smithy.api#xmlName": "GroupName" + "aws.protocols#ec2QueryName": "AdditionalInfo", + "smithy.api#documentation": "

Reserved.

", + "smithy.api#xmlName": "additionalInfo" } }, - "InstanceInitiatedShutdownBehavior": { - "target": "com.amazonaws.ec2#ShutdownBehavior", + "UserData": { + "target": "com.amazonaws.ec2#UserData", "traits": { - "aws.protocols#ec2QueryName": "InstanceInitiatedShutdownBehavior", - "smithy.api#documentation": "

Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the\n operating system command for system shutdown).

", - "smithy.api#xmlName": "instanceInitiatedShutdownBehavior" + "aws.protocols#ec2QueryName": "UserData", + "smithy.api#documentation": "

The Base64-encoded user data to make available to the instance.

", + "smithy.api#xmlName": "userData" } }, "InstanceType": { @@ -57649,14 +58121,6 @@ "smithy.api#xmlName": "instanceType" } }, - "Monitoring": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "Monitoring", - "smithy.api#documentation": "

Indicates whether monitoring is enabled.

", - "smithy.api#xmlName": "monitoring" - } - }, "Placement": { "target": "com.amazonaws.ec2#Placement", "traits": { @@ -57665,12 +58129,12 @@ "smithy.api#xmlName": "placement" } }, - "PrivateIpAddress": { - "target": "com.amazonaws.ec2#String", + "Monitoring": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "PrivateIpAddress", - "smithy.api#documentation": "

[EC2-VPC] An available IP address from the IP address range of the subnet.

", - "smithy.api#xmlName": "privateIpAddress" + "aws.protocols#ec2QueryName": "Monitoring", + "smithy.api#documentation": "

Indicates whether monitoring is enabled.

", + "smithy.api#xmlName": "monitoring" } }, "SubnetId": { @@ -57681,12 +58145,20 @@ "smithy.api#xmlName": "subnetId" } }, - "UserData": { - "target": "com.amazonaws.ec2#UserData", + "InstanceInitiatedShutdownBehavior": { + "target": "com.amazonaws.ec2#ShutdownBehavior", "traits": { - "aws.protocols#ec2QueryName": "UserData", - "smithy.api#documentation": "

The Base64-encoded user data to make available to the instance.

", - "smithy.api#xmlName": "userData" + "aws.protocols#ec2QueryName": "InstanceInitiatedShutdownBehavior", + "smithy.api#documentation": "

Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the\n operating system command for system shutdown).

", + "smithy.api#xmlName": "instanceInitiatedShutdownBehavior" + } + }, + "PrivateIpAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PrivateIpAddress", + "smithy.api#documentation": "

[EC2-VPC] An available IP address from the IP address range of the subnet.

", + "smithy.api#xmlName": "privateIpAddress" } } }, @@ -57697,22 +58169,6 @@ "com.amazonaws.ec2#ImportInstanceRequest": { "type": "structure", "members": { - "Description": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

A description for the instance being imported.

", - "smithy.api#xmlName": "description" - } - }, - "DiskImages": { - "target": "com.amazonaws.ec2#DiskImageList", - "traits": { - "aws.protocols#ec2QueryName": "DiskImage", - "smithy.api#documentation": "

The disk image.

", - "smithy.api#xmlName": "diskImage" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -57721,6 +58177,14 @@ "smithy.api#xmlName": "dryRun" } }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

A description for the instance being imported.

", + "smithy.api#xmlName": "description" + } + }, "LaunchSpecification": { "target": "com.amazonaws.ec2#ImportInstanceLaunchSpecification", "traits": { @@ -57729,6 +58193,14 @@ "smithy.api#xmlName": "launchSpecification" } }, + "DiskImages": { + "target": "com.amazonaws.ec2#DiskImageList", + "traits": { + "aws.protocols#ec2QueryName": "DiskImage", + "smithy.api#documentation": "

The disk image.

", + "smithy.api#xmlName": "diskImage" + } + }, "Platform": { "target": "com.amazonaws.ec2#PlatformValues", "traits": { @@ -57888,6 +58360,13 @@ "com.amazonaws.ec2#ImportKeyPairRequest": { "type": "structure", "members": { + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", + "traits": { + "smithy.api#documentation": "

The tags to apply to the imported key pair.

", + "smithy.api#xmlName": "TagSpecification" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -57915,13 +58394,6 @@ "smithy.api#required": {}, "smithy.api#xmlName": "publicKeyMaterial" } - }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", - "traits": { - "smithy.api#documentation": "

The tags to apply to the imported key pair.

", - "smithy.api#xmlName": "TagSpecification" - } } }, "traits": { @@ -58171,12 +58643,20 @@ "target": "com.amazonaws.ec2#ImportVolumeResult" }, "traits": { - "smithy.api#documentation": "

Creates an import volume task using metadata from the specified disk image.

\n

This API action supports only single-volume VMs. To import multi-volume VMs, use \n ImportImage instead. To import a disk to a snapshot, use\n ImportSnapshot instead.

\n

This API action is not supported by the Command Line Interface (CLI). For \n information about using the Amazon EC2 CLI, which is deprecated, see Importing Disks to Amazon EBS in the Amazon EC2 CLI Reference PDF file.

\n

For information about the import manifest referenced by this API action, see VM Import Manifest.

" + "smithy.api#documentation": "\n

This API action supports only single-volume VMs. To import multi-volume VMs, use \n ImportImage instead. To import a disk to a snapshot, use\n ImportSnapshot instead.

\n
\n

Creates an import volume task using metadata from the specified disk image.

\n

For information about the import manifest referenced by this API action, see VM Import Manifest.

\n

This API action is not supported by the Command Line Interface (CLI).

" } }, "com.amazonaws.ec2#ImportVolumeRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "AvailabilityZone": { "target": "com.amazonaws.ec2#String", "traits": { @@ -58187,22 +58667,6 @@ "smithy.api#xmlName": "availabilityZone" } }, - "Description": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

A description of the volume.

", - "smithy.api#xmlName": "description" - } - }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "Image": { "target": "com.amazonaws.ec2#DiskImageDetail", "traits": { @@ -58213,6 +58677,14 @@ "smithy.api#xmlName": "image" } }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

A description of the volume.

", + "smithy.api#xmlName": "description" + } + }, "Volume": { "target": "com.amazonaws.ec2#VolumeDetail", "traits": { @@ -58313,7 +58785,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the Inference accelerators for the instance type.

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

Describes the Inference accelerators for the instance type.

" } }, "com.amazonaws.ec2#InferenceDeviceCount": { @@ -58356,7 +58828,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the Inference accelerators for the instance type.

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

Describes the Inference accelerators for the instance type.

" } }, "com.amazonaws.ec2#InferenceDeviceInfoList": { @@ -58381,7 +58853,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the memory available to the inference accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

Describes the memory available to the inference accelerator.

" } }, "com.amazonaws.ec2#InferenceDeviceMemorySize": { @@ -58402,166 +58874,6 @@ "com.amazonaws.ec2#Instance": { "type": "structure", "members": { - "AmiLaunchIndex": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "AmiLaunchIndex", - "smithy.api#documentation": "

The AMI launch index, which can be used to find this instance in the launch\n group.

", - "smithy.api#xmlName": "amiLaunchIndex" - } - }, - "ImageId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ImageId", - "smithy.api#documentation": "

The ID of the AMI used to launch the instance.

", - "smithy.api#xmlName": "imageId" - } - }, - "InstanceId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#documentation": "

The ID of the instance.

", - "smithy.api#xmlName": "instanceId" - } - }, - "InstanceType": { - "target": "com.amazonaws.ec2#InstanceType", - "traits": { - "aws.protocols#ec2QueryName": "InstanceType", - "smithy.api#documentation": "

The instance type.

", - "smithy.api#xmlName": "instanceType" - } - }, - "KernelId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "KernelId", - "smithy.api#documentation": "

The kernel associated with this instance, if applicable.

", - "smithy.api#xmlName": "kernelId" - } - }, - "KeyName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "KeyName", - "smithy.api#documentation": "

The name of the key pair, if this instance was launched with an associated key\n pair.

", - "smithy.api#xmlName": "keyName" - } - }, - "LaunchTime": { - "target": "com.amazonaws.ec2#DateTime", - "traits": { - "aws.protocols#ec2QueryName": "LaunchTime", - "smithy.api#documentation": "

The time the instance was launched.

", - "smithy.api#xmlName": "launchTime" - } - }, - "Monitoring": { - "target": "com.amazonaws.ec2#Monitoring", - "traits": { - "aws.protocols#ec2QueryName": "Monitoring", - "smithy.api#documentation": "

The monitoring for the instance.

", - "smithy.api#xmlName": "monitoring" - } - }, - "Placement": { - "target": "com.amazonaws.ec2#Placement", - "traits": { - "aws.protocols#ec2QueryName": "Placement", - "smithy.api#documentation": "

The location where the instance launched, if applicable.

", - "smithy.api#xmlName": "placement" - } - }, - "Platform": { - "target": "com.amazonaws.ec2#PlatformValues", - "traits": { - "aws.protocols#ec2QueryName": "Platform", - "smithy.api#documentation": "

The platform. This value is windows for Windows instances; otherwise, it is empty.

", - "smithy.api#xmlName": "platform" - } - }, - "PrivateDnsName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PrivateDnsName", - "smithy.api#documentation": "

[IPv4 only] The private DNS hostname name assigned to the instance. This DNS hostname\n can only be used inside the Amazon EC2 network. This name is not available until the\n instance enters the running state.

\n

The Amazon-provided DNS server resolves Amazon-provided private DNS\n hostnames if you've enabled DNS resolution and DNS hostnames in your VPC. If you are not\n using the Amazon-provided DNS server in your VPC, your custom domain name servers must\n resolve the hostname as appropriate.

", - "smithy.api#xmlName": "privateDnsName" - } - }, - "PrivateIpAddress": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PrivateIpAddress", - "smithy.api#documentation": "

The private IPv4 address assigned to the instance.

", - "smithy.api#xmlName": "privateIpAddress" - } - }, - "ProductCodes": { - "target": "com.amazonaws.ec2#ProductCodeList", - "traits": { - "aws.protocols#ec2QueryName": "ProductCodes", - "smithy.api#documentation": "

The product codes attached to this instance, if applicable.

", - "smithy.api#xmlName": "productCodes" - } - }, - "PublicDnsName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DnsName", - "smithy.api#documentation": "

[IPv4 only] The public DNS name assigned to the instance. This name is not available\n until the instance enters the running state. This name is only\n available if you've enabled DNS hostnames for your VPC.

", - "smithy.api#xmlName": "dnsName" - } - }, - "PublicIpAddress": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "IpAddress", - "smithy.api#documentation": "

The public IPv4 address, or the Carrier IP address assigned to the instance, if\n applicable.

\n

A Carrier IP address only applies to an instance launched in a subnet associated with\n a Wavelength Zone.

", - "smithy.api#xmlName": "ipAddress" - } - }, - "RamdiskId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "RamdiskId", - "smithy.api#documentation": "

The RAM disk associated with this instance, if applicable.

", - "smithy.api#xmlName": "ramdiskId" - } - }, - "State": { - "target": "com.amazonaws.ec2#InstanceState", - "traits": { - "aws.protocols#ec2QueryName": "InstanceState", - "smithy.api#documentation": "

The current state of the instance.

", - "smithy.api#xmlName": "instanceState" - } - }, - "StateTransitionReason": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Reason", - "smithy.api#documentation": "

The reason for the most recent state transition. This might be an empty string.

", - "smithy.api#xmlName": "reason" - } - }, - "SubnetId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "SubnetId", - "smithy.api#documentation": "

The ID of the subnet in which the instance is running.

", - "smithy.api#xmlName": "subnetId" - } - }, - "VpcId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpcId", - "smithy.api#documentation": "

The ID of the VPC in which the instance is running.

", - "smithy.api#xmlName": "vpcId" - } - }, "Architecture": { "target": "com.amazonaws.ec2#ArchitectureValues", "traits": { @@ -58630,7 +58942,7 @@ "target": "com.amazonaws.ec2#ElasticGpuAssociationList", "traits": { "aws.protocols#ec2QueryName": "ElasticGpuAssociationSet", - "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
", + "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
", "smithy.api#xmlName": "elasticGpuAssociationSet" } }, @@ -58638,7 +58950,7 @@ "target": "com.amazonaws.ec2#ElasticInferenceAcceleratorAssociationList", "traits": { "aws.protocols#ec2QueryName": "ElasticInferenceAcceleratorAssociationSet", - "smithy.api#documentation": "

The elastic inference accelerator associated with the instance.

", + "smithy.api#documentation": "

Deprecated

\n \n

Amazon Elastic Inference is no longer available.

\n
", "smithy.api#xmlName": "elasticInferenceAcceleratorAssociationSet" } }, @@ -58857,6 +59169,166 @@ "smithy.api#documentation": "

The boot mode that is used to boot the instance at launch or start. For more information, see Boot modes in the\n Amazon EC2 User Guide.

", "smithy.api#xmlName": "currentInstanceBootMode" } + }, + "InstanceId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The ID of the instance.

", + "smithy.api#xmlName": "instanceId" + } + }, + "ImageId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ImageId", + "smithy.api#documentation": "

The ID of the AMI used to launch the instance.

", + "smithy.api#xmlName": "imageId" + } + }, + "State": { + "target": "com.amazonaws.ec2#InstanceState", + "traits": { + "aws.protocols#ec2QueryName": "InstanceState", + "smithy.api#documentation": "

The current state of the instance.

", + "smithy.api#xmlName": "instanceState" + } + }, + "PrivateDnsName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PrivateDnsName", + "smithy.api#documentation": "

[IPv4 only] The private DNS hostname name assigned to the instance. This DNS hostname\n can only be used inside the Amazon EC2 network. This name is not available until the\n instance enters the running state.

\n

The Amazon-provided DNS server resolves Amazon-provided private DNS\n hostnames if you've enabled DNS resolution and DNS hostnames in your VPC. If you are not\n using the Amazon-provided DNS server in your VPC, your custom domain name servers must\n resolve the hostname as appropriate.

", + "smithy.api#xmlName": "privateDnsName" + } + }, + "PublicDnsName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DnsName", + "smithy.api#documentation": "

[IPv4 only] The public DNS name assigned to the instance. This name is not available\n until the instance enters the running state. This name is only\n available if you've enabled DNS hostnames for your VPC.

", + "smithy.api#xmlName": "dnsName" + } + }, + "StateTransitionReason": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Reason", + "smithy.api#documentation": "

The reason for the most recent state transition. This might be an empty string.

", + "smithy.api#xmlName": "reason" + } + }, + "KeyName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "KeyName", + "smithy.api#documentation": "

The name of the key pair, if this instance was launched with an associated key\n pair.

", + "smithy.api#xmlName": "keyName" + } + }, + "AmiLaunchIndex": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "AmiLaunchIndex", + "smithy.api#documentation": "

The AMI launch index, which can be used to find this instance in the launch\n group.

", + "smithy.api#xmlName": "amiLaunchIndex" + } + }, + "ProductCodes": { + "target": "com.amazonaws.ec2#ProductCodeList", + "traits": { + "aws.protocols#ec2QueryName": "ProductCodes", + "smithy.api#documentation": "

The product codes attached to this instance, if applicable.

", + "smithy.api#xmlName": "productCodes" + } + }, + "InstanceType": { + "target": "com.amazonaws.ec2#InstanceType", + "traits": { + "aws.protocols#ec2QueryName": "InstanceType", + "smithy.api#documentation": "

The instance type.

", + "smithy.api#xmlName": "instanceType" + } + }, + "LaunchTime": { + "target": "com.amazonaws.ec2#DateTime", + "traits": { + "aws.protocols#ec2QueryName": "LaunchTime", + "smithy.api#documentation": "

The time the instance was launched.

", + "smithy.api#xmlName": "launchTime" + } + }, + "Placement": { + "target": "com.amazonaws.ec2#Placement", + "traits": { + "aws.protocols#ec2QueryName": "Placement", + "smithy.api#documentation": "

The location where the instance launched, if applicable.

", + "smithy.api#xmlName": "placement" + } + }, + "KernelId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "KernelId", + "smithy.api#documentation": "

The kernel associated with this instance, if applicable.

", + "smithy.api#xmlName": "kernelId" + } + }, + "RamdiskId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "RamdiskId", + "smithy.api#documentation": "

The RAM disk associated with this instance, if applicable.

", + "smithy.api#xmlName": "ramdiskId" + } + }, + "Platform": { + "target": "com.amazonaws.ec2#PlatformValues", + "traits": { + "aws.protocols#ec2QueryName": "Platform", + "smithy.api#documentation": "

The platform. This value is windows for Windows instances; otherwise, it is empty.

", + "smithy.api#xmlName": "platform" + } + }, + "Monitoring": { + "target": "com.amazonaws.ec2#Monitoring", + "traits": { + "aws.protocols#ec2QueryName": "Monitoring", + "smithy.api#documentation": "

The monitoring for the instance.

", + "smithy.api#xmlName": "monitoring" + } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#documentation": "

The ID of the subnet in which the instance is running.

", + "smithy.api#xmlName": "subnetId" + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "

The ID of the VPC in which the instance is running.

", + "smithy.api#xmlName": "vpcId" + } + }, + "PrivateIpAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PrivateIpAddress", + "smithy.api#documentation": "

The private IPv4 address assigned to the instance.

", + "smithy.api#xmlName": "privateIpAddress" + } + }, + "PublicIpAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "IpAddress", + "smithy.api#documentation": "

The public IPv4 address, or the Carrier IP address assigned to the instance, if\n applicable.

\n

A Carrier IP address only applies to an instance launched in a subnet associated with\n a Wavelength Zone.

", + "smithy.api#xmlName": "ipAddress" + } } }, "traits": { @@ -58906,14 +59378,6 @@ "com.amazonaws.ec2#InstanceAttribute": { "type": "structure", "members": { - "Groups": { - "target": "com.amazonaws.ec2#GroupIdentifierList", - "traits": { - "aws.protocols#ec2QueryName": "GroupSet", - "smithy.api#documentation": "

The security groups associated with the instance.

", - "smithy.api#xmlName": "groupSet" - } - }, "BlockDeviceMappings": { "target": "com.amazonaws.ec2#InstanceBlockDeviceMappingList", "traits": { @@ -59041,6 +59505,14 @@ "smithy.api#documentation": "

To enable the instance for Amazon Web Services Stop Protection, set this parameter to\n true; otherwise, set it to false.

", "smithy.api#xmlName": "disableApiStop" } + }, + "Groups": { + "target": "com.amazonaws.ec2#GroupIdentifierList", + "traits": { + "aws.protocols#ec2QueryName": "GroupSet", + "smithy.api#documentation": "

The security groups associated with the instance.

", + "smithy.api#xmlName": "groupSet" + } } }, "traits": { @@ -59217,14 +59689,6 @@ "smithy.api#xmlName": "ebs" } }, - "NoDevice": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "NoDevice", - "smithy.api#documentation": "

suppress the specified device included in the block device mapping.

", - "smithy.api#xmlName": "noDevice" - } - }, "VirtualName": { "target": "com.amazonaws.ec2#String", "traits": { @@ -59232,6 +59696,14 @@ "smithy.api#documentation": "

The virtual device name.

", "smithy.api#xmlName": "virtualName" } + }, + "NoDevice": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NoDevice", + "smithy.api#documentation": "

suppress the specified device included in the block device mapping.

", + "smithy.api#xmlName": "noDevice" + } } }, "traits": { @@ -60990,7 +61462,7 @@ "target": "com.amazonaws.ec2#AcceleratorTypeSet", "traits": { "aws.protocols#ec2QueryName": "AcceleratorTypeSet", - "smithy.api#documentation": "

The accelerator types that must be on the instance type.

\n
    \n
  • \n

    For instance types with GPU accelerators, specify gpu.

    \n
  • \n
  • \n

    For instance types with FPGA accelerators, specify fpga.

    \n
  • \n
  • \n

    For instance types with inference accelerators, specify inference.

    \n
  • \n
\n

Default: Any accelerator type

", + "smithy.api#documentation": "

The accelerator types that must be on the instance type.

\n
    \n
  • \n

    For instance types with GPU accelerators, specify gpu.

    \n
  • \n
  • \n

    For instance types with FPGA accelerators, specify fpga.

    \n
  • \n
\n

Default: Any accelerator type

", "smithy.api#xmlName": "acceleratorTypeSet" } }, @@ -61165,7 +61637,7 @@ "AcceleratorTypes": { "target": "com.amazonaws.ec2#AcceleratorTypeSet", "traits": { - "smithy.api#documentation": "

The accelerator types that must be on the instance type.

\n
    \n
  • \n

    To include instance types with GPU hardware, specify gpu.

    \n
  • \n
  • \n

    To include instance types with FPGA hardware, specify fpga.

    \n
  • \n
  • \n

    To include instance types with inference hardware, specify inference.

    \n
  • \n
\n

Default: Any accelerator type

", + "smithy.api#documentation": "

The accelerator types that must be on the instance type.

\n
    \n
  • \n

    To include instance types with GPU hardware, specify gpu.

    \n
  • \n
  • \n

    To include instance types with FPGA hardware, specify fpga.

    \n
  • \n
\n

Default: Any accelerator type

", "smithy.api#xmlName": "AcceleratorType" } }, @@ -61312,14 +61784,6 @@ "com.amazonaws.ec2#InstanceStateChange": { "type": "structure", "members": { - "CurrentState": { - "target": "com.amazonaws.ec2#InstanceState", - "traits": { - "aws.protocols#ec2QueryName": "CurrentState", - "smithy.api#documentation": "

The current state of the instance.

", - "smithy.api#xmlName": "currentState" - } - }, "InstanceId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -61328,6 +61792,14 @@ "smithy.api#xmlName": "instanceId" } }, + "CurrentState": { + "target": "com.amazonaws.ec2#InstanceState", + "traits": { + "aws.protocols#ec2QueryName": "CurrentState", + "smithy.api#documentation": "

The current state of the instance.

", + "smithy.api#xmlName": "currentState" + } + }, "PreviousState": { "target": "com.amazonaws.ec2#InstanceState", "traits": { @@ -66673,6 +67145,54 @@ "traits": { "smithy.api#enumValue": "mac2-m1ultra.metal" } + }, + "g6e_xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.xlarge" + } + }, + "g6e_2xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.2xlarge" + } + }, + "g6e_4xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.4xlarge" + } + }, + "g6e_8xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.8xlarge" + } + }, + "g6e_12xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.12xlarge" + } + }, + "g6e_16xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.16xlarge" + } + }, + "g6e_24xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.24xlarge" + } + }, + "g6e_48xlarge": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "g6e.48xlarge" + } } } }, @@ -67284,6 +67804,14 @@ "com.amazonaws.ec2#IpPermission": { "type": "structure", "members": { + "IpProtocol": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "IpProtocol", + "smithy.api#documentation": "

The IP protocol name (tcp, udp, icmp, icmpv6) \n or number (see Protocol Numbers).

\n

Use -1 to specify all protocols. When authorizing\n security group rules, specifying -1 or a protocol number other than\n tcp, udp, icmp, or icmpv6 allows\n traffic on all ports, regardless of any port range you specify. For tcp,\n udp, and icmp, you must specify a port range. For icmpv6,\n the port range is optional; if you omit the port range, traffic for all types and codes is allowed.

", + "smithy.api#xmlName": "ipProtocol" + } + }, "FromPort": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -67292,12 +67820,20 @@ "smithy.api#xmlName": "fromPort" } }, - "IpProtocol": { - "target": "com.amazonaws.ec2#String", + "ToPort": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "IpProtocol", - "smithy.api#documentation": "

The IP protocol name (tcp, udp, icmp, icmpv6) \n or number (see Protocol Numbers).

\n

Use -1 to specify all protocols. When authorizing\n security group rules, specifying -1 or a protocol number other than\n tcp, udp, icmp, or icmpv6 allows\n traffic on all ports, regardless of any port range you specify. For tcp,\n udp, and icmp, you must specify a port range. For icmpv6,\n the port range is optional; if you omit the port range, traffic for all types and codes is allowed.

", - "smithy.api#xmlName": "ipProtocol" + "aws.protocols#ec2QueryName": "ToPort", + "smithy.api#documentation": "

If the protocol is TCP or UDP, this is the end of the port range.\n If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). \n If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).

", + "smithy.api#xmlName": "toPort" + } + }, + "UserIdGroupPairs": { + "target": "com.amazonaws.ec2#UserIdGroupPairList", + "traits": { + "aws.protocols#ec2QueryName": "Groups", + "smithy.api#documentation": "

The security group and Amazon Web Services account ID pairs.

", + "smithy.api#xmlName": "groups" } }, "IpRanges": { @@ -67323,22 +67859,6 @@ "smithy.api#documentation": "

The prefix list IDs.

", "smithy.api#xmlName": "prefixListIds" } - }, - "ToPort": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "ToPort", - "smithy.api#documentation": "

If the protocol is TCP or UDP, this is the end of the port range.\n If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). \n If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).

", - "smithy.api#xmlName": "toPort" - } - }, - "UserIdGroupPairs": { - "target": "com.amazonaws.ec2#UserIdGroupPairList", - "traits": { - "aws.protocols#ec2QueryName": "Groups", - "smithy.api#documentation": "

The security group and Amazon Web Services account ID pairs.

", - "smithy.api#xmlName": "groups" - } } }, "traits": { @@ -67366,14 +67886,6 @@ "com.amazonaws.ec2#IpRange": { "type": "structure", "members": { - "CidrIp": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CidrIp", - "smithy.api#documentation": "

The IPv4 address range. You can either specify a CIDR block or a source security group,\n not both. To specify a single IPv4 address, use the /32 prefix length.

", - "smithy.api#xmlName": "cidrIp" - } - }, "Description": { "target": "com.amazonaws.ec2#String", "traits": { @@ -67381,6 +67893,14 @@ "smithy.api#documentation": "

A description for the security group rule that references this IPv4 address range.

\n

Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9,\n spaces, and ._-:/()#,@[]+=&;{}!$*

", "smithy.api#xmlName": "description" } + }, + "CidrIp": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CidrIp", + "smithy.api#documentation": "

The IPv4 address range. You can either specify a CIDR block or a source security group,\n not both. To specify a single IPv4 address, use the /32 prefix length.

", + "smithy.api#xmlName": "cidrIp" + } } }, "traits": { @@ -68096,6 +68616,14 @@ "smithy.api#xmlName": "vpcId" } }, + "SubnetId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#documentation": "

The subnet ID.

", + "smithy.api#xmlName": "subnetId" + } + }, "NetworkInterfaceAttachmentStatus": { "target": "com.amazonaws.ec2#IpamNetworkInterfaceAttachmentStatus", "traits": { @@ -70525,14 +71053,6 @@ "com.amazonaws.ec2#Ipv6Range": { "type": "structure", "members": { - "CidrIpv6": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CidrIpv6", - "smithy.api#documentation": "

The IPv6 address range. You can either specify a CIDR block or a source security group,\n not both. To specify a single IPv6 address, use the /128 prefix length.

", - "smithy.api#xmlName": "cidrIpv6" - } - }, "Description": { "target": "com.amazonaws.ec2#String", "traits": { @@ -70540,6 +71060,14 @@ "smithy.api#documentation": "

A description for the security group rule that references this IPv6 address range.

\n

Constraints: Up to 255 characters in length. Allowed characters are a-z, A-Z, 0-9,\n spaces, and ._-:/()#,@[]+=&;{}!$*

", "smithy.api#xmlName": "description" } + }, + "CidrIpv6": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CidrIpv6", + "smithy.api#documentation": "

The IPv6 address range. You can either specify a CIDR block or a source security group,\n not both. To specify a single IPv6 address, use the /128 prefix length.

", + "smithy.api#xmlName": "cidrIpv6" + } } }, "traits": { @@ -70604,20 +71132,20 @@ "com.amazonaws.ec2#KeyPair": { "type": "structure", "members": { - "KeyFingerprint": { + "KeyPairId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "KeyFingerprint", - "smithy.api#documentation": "
    \n
  • \n

    For RSA key pairs, the key fingerprint is the SHA-1 digest of the DER encoded private key.

    \n
  • \n
  • \n

    For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256 digest, which is the default for OpenSSH, starting with OpenSSH 6.8.

    \n
  • \n
", - "smithy.api#xmlName": "keyFingerprint" + "aws.protocols#ec2QueryName": "KeyPairId", + "smithy.api#documentation": "

The ID of the key pair.

", + "smithy.api#xmlName": "keyPairId" } }, - "KeyMaterial": { - "target": "com.amazonaws.ec2#SensitiveUserData", + "Tags": { + "target": "com.amazonaws.ec2#TagList", "traits": { - "aws.protocols#ec2QueryName": "KeyMaterial", - "smithy.api#documentation": "

An unencrypted PEM encoded RSA or ED25519 private key.

", - "smithy.api#xmlName": "keyMaterial" + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Any tags applied to the key pair.

", + "smithy.api#xmlName": "tagSet" } }, "KeyName": { @@ -70628,20 +71156,20 @@ "smithy.api#xmlName": "keyName" } }, - "KeyPairId": { + "KeyFingerprint": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "KeyPairId", - "smithy.api#documentation": "

The ID of the key pair.

", - "smithy.api#xmlName": "keyPairId" + "aws.protocols#ec2QueryName": "KeyFingerprint", + "smithy.api#documentation": "
    \n
  • \n

    For RSA key pairs, the key fingerprint is the SHA-1 digest of the DER encoded private key.

    \n
  • \n
  • \n

    For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256 digest, which is the default for OpenSSH, starting with OpenSSH 6.8.

    \n
  • \n
", + "smithy.api#xmlName": "keyFingerprint" } }, - "Tags": { - "target": "com.amazonaws.ec2#TagList", + "KeyMaterial": { + "target": "com.amazonaws.ec2#SensitiveUserData", "traits": { - "aws.protocols#ec2QueryName": "TagSet", - "smithy.api#documentation": "

Any tags applied to the key pair.

", - "smithy.api#xmlName": "tagSet" + "aws.protocols#ec2QueryName": "KeyMaterial", + "smithy.api#documentation": "

An unencrypted PEM encoded RSA or ED25519 private key.

", + "smithy.api#xmlName": "keyMaterial" } } }, @@ -70672,22 +71200,6 @@ "smithy.api#xmlName": "keyPairId" } }, - "KeyFingerprint": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "KeyFingerprint", - "smithy.api#documentation": "

If you used CreateKeyPair to create the key pair:

\n
    \n
  • \n

    For RSA key pairs, the key fingerprint is the SHA-1 digest of the DER encoded private key.

    \n
  • \n
  • \n

    For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256 digest, which \n is the default for OpenSSH, starting with OpenSSH 6.8.

    \n
  • \n
\n

If you used ImportKeyPair to provide Amazon Web Services the public key:

\n
    \n
  • \n

    For RSA key pairs, the key fingerprint is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    \n
  • \n
  • \n

    For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256\n digest, which is the default for OpenSSH, starting with OpenSSH 6.8.

    \n
  • \n
", - "smithy.api#xmlName": "keyFingerprint" - } - }, - "KeyName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "KeyName", - "smithy.api#documentation": "

The name of the key pair.

", - "smithy.api#xmlName": "keyName" - } - }, "KeyType": { "target": "com.amazonaws.ec2#KeyType", "traits": { @@ -70719,6 +71231,22 @@ "smithy.api#documentation": "

If you used Amazon EC2 to create the key pair, this is the date and time when the key\n was created, in ISO\n 8601 date-time format, in the UTC time zone.

\n

If you imported an existing key pair to Amazon EC2, this is the date and time the key\n was imported, in ISO\n 8601 date-time format, in the UTC time zone.

", "smithy.api#xmlName": "createTime" } + }, + "KeyName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "KeyName", + "smithy.api#documentation": "

The name of the key pair.

", + "smithy.api#xmlName": "keyName" + } + }, + "KeyFingerprint": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "KeyFingerprint", + "smithy.api#documentation": "

If you used CreateKeyPair to create the key pair:

\n
    \n
  • \n

    For RSA key pairs, the key fingerprint is the SHA-1 digest of the DER encoded private key.

    \n
  • \n
  • \n

    For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256 digest, which \n is the default for OpenSSH, starting with OpenSSH 6.8.

    \n
  • \n
\n

If you used ImportKeyPair to provide Amazon Web Services the public key:

\n
    \n
  • \n

    For RSA key pairs, the key fingerprint is the MD5 public key fingerprint as specified in section 4 of RFC4716.

    \n
  • \n
  • \n

    For ED25519 key pairs, the key fingerprint is the base64-encoded SHA-256\n digest, which is the default for OpenSSH, starting with OpenSSH 6.8.

    \n
  • \n
", + "smithy.api#xmlName": "keyFingerprint" + } } }, "traits": { @@ -70737,6 +71265,9 @@ "com.amazonaws.ec2#KeyPairName": { "type": "string" }, + "com.amazonaws.ec2#KeyPairNameWithResolver": { + "type": "string" + }, "com.amazonaws.ec2#KeyType": { "type": "enum", "members": { @@ -70787,22 +71318,6 @@ "com.amazonaws.ec2#LaunchPermission": { "type": "structure", "members": { - "Group": { - "target": "com.amazonaws.ec2#PermissionGroup", - "traits": { - "aws.protocols#ec2QueryName": "Group", - "smithy.api#documentation": "

The name of the group.

", - "smithy.api#xmlName": "group" - } - }, - "UserId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "UserId", - "smithy.api#documentation": "

The Amazon Web Services account ID.

\n

Constraints: Up to 10 000 account IDs can be specified in a single request.

", - "smithy.api#xmlName": "userId" - } - }, "OrganizationArn": { "target": "com.amazonaws.ec2#String", "traits": { @@ -70818,6 +71333,22 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an organizational unit (OU).

", "smithy.api#xmlName": "organizationalUnitArn" } + }, + "UserId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "UserId", + "smithy.api#documentation": "

The Amazon Web Services account ID.

\n

Constraints: Up to 10 000 account IDs can be specified in a single request.

", + "smithy.api#xmlName": "userId" + } + }, + "Group": { + "target": "com.amazonaws.ec2#PermissionGroup", + "traits": { + "aws.protocols#ec2QueryName": "Group", + "smithy.api#documentation": "

The name of the group.

", + "smithy.api#xmlName": "group" + } } }, "traits": { @@ -70864,14 +71395,6 @@ "smithy.api#xmlName": "userData" } }, - "SecurityGroups": { - "target": "com.amazonaws.ec2#GroupIdentifierList", - "traits": { - "aws.protocols#ec2QueryName": "GroupSet", - "smithy.api#documentation": "

The IDs of the security groups.

", - "smithy.api#xmlName": "groupSet" - } - }, "AddressingType": { "target": "com.amazonaws.ec2#String", "traits": { @@ -70968,6 +71491,14 @@ "smithy.api#xmlName": "subnetId" } }, + "SecurityGroups": { + "target": "com.amazonaws.ec2#GroupIdentifierList", + "traits": { + "aws.protocols#ec2QueryName": "GroupSet", + "smithy.api#documentation": "

The IDs of the security groups.

", + "smithy.api#xmlName": "groupSet" + } + }, "Monitoring": { "target": "com.amazonaws.ec2#RunInstancesMonitoringEnabled", "traits": { @@ -71466,7 +71997,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes an elastic inference accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

Describes an elastic inference accelerator.

" } }, "com.amazonaws.ec2#LaunchTemplateElasticInferenceAcceleratorCount": { @@ -71507,7 +72038,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes an elastic inference accelerator.

" + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

Describes an elastic inference accelerator.

" } }, "com.amazonaws.ec2#LaunchTemplateElasticInferenceAcceleratorResponseList": { @@ -75377,7 +75908,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "InstanceFamily": { @@ -75658,24 +76189,6 @@ "com.amazonaws.ec2#ModifyHostsRequest": { "type": "structure", "members": { - "AutoPlacement": { - "target": "com.amazonaws.ec2#AutoPlacement", - "traits": { - "aws.protocols#ec2QueryName": "AutoPlacement", - "smithy.api#documentation": "

Specify whether to enable or disable auto-placement.

", - "smithy.api#xmlName": "autoPlacement" - } - }, - "HostIds": { - "target": "com.amazonaws.ec2#RequestHostIdList", - "traits": { - "aws.protocols#ec2QueryName": "HostId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The IDs of the Dedicated Hosts to modify.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "hostId" - } - }, "HostRecovery": { "target": "com.amazonaws.ec2#HostRecovery", "traits": { @@ -75699,6 +76212,24 @@ "traits": { "smithy.api#documentation": "

Indicates whether to enable or disable host maintenance for the Dedicated Host. For\n more information, see Host\n maintenance in the Amazon EC2 User Guide.

" } + }, + "HostIds": { + "target": "com.amazonaws.ec2#RequestHostIdList", + "traits": { + "aws.protocols#ec2QueryName": "HostId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The IDs of the Dedicated Hosts to modify.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "hostId" + } + }, + "AutoPlacement": { + "target": "com.amazonaws.ec2#AutoPlacement", + "traits": { + "aws.protocols#ec2QueryName": "AutoPlacement", + "smithy.api#documentation": "

Specify whether to enable or disable auto-placement.

", + "smithy.api#xmlName": "autoPlacement" + } } }, "traits": { @@ -75780,16 +76311,6 @@ "com.amazonaws.ec2#ModifyIdentityIdFormatRequest": { "type": "structure", "members": { - "PrincipalArn": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PrincipalArn", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ARN of the principal, which can be an IAM user, IAM role, or the root user. Specify\n all to modify the ID format for all IAM users, IAM roles, and the root user of\n the account.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "principalArn" - } - }, "Resource": { "target": "com.amazonaws.ec2#String", "traits": { @@ -75809,6 +76330,16 @@ "smithy.api#required": {}, "smithy.api#xmlName": "useLongIds" } + }, + "PrincipalArn": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PrincipalArn", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the principal, which can be an IAM user, IAM role, or the root user. Specify\n all to modify the ID format for all IAM users, IAM roles, and the root user of\n the account.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "principalArn" + } } }, "traits": { @@ -75921,14 +76452,6 @@ "smithy.api#documentation": "

The value of the attribute being modified. \n This parameter can be used only when the Attribute parameter is description or imdsSupport.

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "OrganizationArns": { "target": "com.amazonaws.ec2#OrganizationArnStringList", "traits": { @@ -75948,6 +76471,14 @@ "traits": { "smithy.api#documentation": "

Set to v2.0 to indicate that IMDSv2 is specified in the AMI. Instances\n launched from this AMI will have HttpTokens automatically set to\n required so that, by default, the instance requires that IMDSv2 is used when\n requesting instance metadata. In addition, HttpPutResponseHopLimit is set to\n 2. For more information, see Configure\n the AMI in the Amazon EC2 User Guide.

\n \n

Do not use this parameter unless your AMI software supports IMDSv2. After you set the value to v2.0, \n you can't undo it. The only way to “reset” your AMI is to create a new AMI from the underlying snapshot.

\n
" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } } }, "traits": { @@ -76000,77 +76531,60 @@ "smithy.api#documentation": "

Enable or disable source/destination checks, which ensure that the instance is either\n the source or the destination of any traffic that it receives. If the value is\n true, source/destination checks are enabled; otherwise, they are\n disabled. The default value is true. You must disable source/destination\n checks if the instance runs services such as network address translation, routing, or\n firewalls.

" } }, - "Attribute": { - "target": "com.amazonaws.ec2#InstanceAttributeName", - "traits": { - "aws.protocols#ec2QueryName": "Attribute", - "smithy.api#documentation": "

The name of the attribute to modify.

\n \n

You can modify the following attributes only: disableApiTermination |\n instanceType | kernel | ramdisk |\n instanceInitiatedShutdownBehavior | blockDeviceMapping\n | userData | sourceDestCheck | groupSet |\n ebsOptimized | sriovNetSupport |\n enaSupport | nvmeSupport | disableApiStop\n | enclaveOptions\n

\n
", - "smithy.api#xmlName": "attribute" - } - }, - "BlockDeviceMappings": { - "target": "com.amazonaws.ec2#InstanceBlockDeviceMappingSpecificationList", - "traits": { - "aws.protocols#ec2QueryName": "BlockDeviceMapping", - "smithy.api#documentation": "

Modifies the DeleteOnTermination attribute for volumes that are currently\n attached. The volume must be owned by the caller. If no value is specified for\n DeleteOnTermination, the default is true and the volume is\n deleted when the instance is terminated. You can't modify the DeleteOnTermination \n attribute for volumes that are attached to Fargate tasks.

\n

To add instance store volumes to an Amazon EBS-backed instance, you must add them when\n you launch the instance. For more information, see Update the block device mapping when launching an instance in the\n Amazon EC2 User Guide.

", - "smithy.api#xmlName": "blockDeviceMapping" - } - }, - "DisableApiTermination": { + "DisableApiStop": { "target": "com.amazonaws.ec2#AttributeBooleanValue", "traits": { - "aws.protocols#ec2QueryName": "DisableApiTermination", - "smithy.api#documentation": "

If the value is true, you can't terminate the instance using the Amazon\n EC2 console, CLI, or API; otherwise, you can. You cannot use this parameter for Spot\n Instances.

", - "smithy.api#xmlName": "disableApiTermination" + "smithy.api#documentation": "

Indicates whether an instance is enabled for stop protection. For more information,\n see Enable stop\n protection for your instance.

\n

" } }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, - "EbsOptimized": { - "target": "com.amazonaws.ec2#AttributeBooleanValue", + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", "traits": { - "aws.protocols#ec2QueryName": "EbsOptimized", - "smithy.api#documentation": "

Specifies whether the instance is optimized for Amazon EBS I/O. This optimization\n provides dedicated throughput to Amazon EBS and an optimized configuration stack to\n provide optimal EBS I/O performance. This optimization isn't available with all instance\n types. Additional usage charges apply when using an EBS Optimized instance.

", - "smithy.api#xmlName": "ebsOptimized" + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the instance.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "instanceId" } }, - "EnaSupport": { - "target": "com.amazonaws.ec2#AttributeBooleanValue", + "Attribute": { + "target": "com.amazonaws.ec2#InstanceAttributeName", "traits": { - "aws.protocols#ec2QueryName": "EnaSupport", - "smithy.api#documentation": "

Set to true to enable enhanced networking with ENA for the\n instance.

\n

This option is supported only for HVM instances. Specifying this option with a PV\n instance can make it unreachable.

", - "smithy.api#xmlName": "enaSupport" + "aws.protocols#ec2QueryName": "Attribute", + "smithy.api#documentation": "

The name of the attribute to modify.

\n \n

You can modify the following attributes only: disableApiTermination |\n instanceType | kernel | ramdisk |\n instanceInitiatedShutdownBehavior | blockDeviceMapping\n | userData | sourceDestCheck | groupSet |\n ebsOptimized | sriovNetSupport |\n enaSupport | nvmeSupport | disableApiStop\n | enclaveOptions\n

\n
", + "smithy.api#xmlName": "attribute" } }, - "Groups": { - "target": "com.amazonaws.ec2#GroupIdStringList", + "Value": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Replaces the security groups of the instance with the specified security groups.\n You must specify the ID of at least one security group, even if it's just the default\n security group for the VPC.

", - "smithy.api#xmlName": "GroupId" + "aws.protocols#ec2QueryName": "Value", + "smithy.api#documentation": "

A new value for the attribute. Use only with the kernel,\n ramdisk, userData, disableApiTermination, or\n instanceInitiatedShutdownBehavior attribute.

", + "smithy.api#xmlName": "value" } }, - "InstanceId": { - "target": "com.amazonaws.ec2#InstanceId", + "BlockDeviceMappings": { + "target": "com.amazonaws.ec2#InstanceBlockDeviceMappingSpecificationList", "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The ID of the instance.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "instanceId" + "aws.protocols#ec2QueryName": "BlockDeviceMapping", + "smithy.api#documentation": "

Modifies the DeleteOnTermination attribute for volumes that are currently\n attached. The volume must be owned by the caller. If no value is specified for\n DeleteOnTermination, the default is true and the volume is\n deleted when the instance is terminated. You can't modify the DeleteOnTermination \n attribute for volumes that are attached to Fargate tasks.

\n

To add instance store volumes to an Amazon EBS-backed instance, you must add them when\n you launch the instance. For more information, see Update the block device mapping when launching an instance in the\n Amazon EC2 User Guide.

", + "smithy.api#xmlName": "blockDeviceMapping" } }, - "InstanceInitiatedShutdownBehavior": { - "target": "com.amazonaws.ec2#AttributeValue", + "DisableApiTermination": { + "target": "com.amazonaws.ec2#AttributeBooleanValue", "traits": { - "aws.protocols#ec2QueryName": "InstanceInitiatedShutdownBehavior", - "smithy.api#documentation": "

Specifies whether an instance stops or terminates when you initiate shutdown from the\n instance (using the operating system command for system shutdown).

", - "smithy.api#xmlName": "instanceInitiatedShutdownBehavior" + "aws.protocols#ec2QueryName": "DisableApiTermination", + "smithy.api#documentation": "

If the value is true, you can't terminate the instance using the Amazon\n EC2 console, CLI, or API; otherwise, you can. You cannot use this parameter for Spot\n Instances.

", + "smithy.api#xmlName": "disableApiTermination" } }, "InstanceType": { @@ -76097,14 +76611,6 @@ "smithy.api#xmlName": "ramdisk" } }, - "SriovNetSupport": { - "target": "com.amazonaws.ec2#AttributeValue", - "traits": { - "aws.protocols#ec2QueryName": "SriovNetSupport", - "smithy.api#documentation": "

Set to simple to enable enhanced networking with the Intel 82599 Virtual\n Function interface for the instance.

\n

There is no way to disable enhanced networking with the Intel 82599 Virtual Function\n interface at this time.

\n

This option is supported only for HVM instances. Specifying this option with a PV\n instance can make it unreachable.

", - "smithy.api#xmlName": "sriovNetSupport" - } - }, "UserData": { "target": "com.amazonaws.ec2#BlobAttributeValue", "traits": { @@ -76113,18 +76619,43 @@ "smithy.api#xmlName": "userData" } }, - "Value": { - "target": "com.amazonaws.ec2#String", + "InstanceInitiatedShutdownBehavior": { + "target": "com.amazonaws.ec2#AttributeValue", "traits": { - "aws.protocols#ec2QueryName": "Value", - "smithy.api#documentation": "

A new value for the attribute. Use only with the kernel,\n ramdisk, userData, disableApiTermination, or\n instanceInitiatedShutdownBehavior attribute.

", - "smithy.api#xmlName": "value" + "aws.protocols#ec2QueryName": "InstanceInitiatedShutdownBehavior", + "smithy.api#documentation": "

Specifies whether an instance stops or terminates when you initiate shutdown from the\n instance (using the operating system command for system shutdown).

", + "smithy.api#xmlName": "instanceInitiatedShutdownBehavior" } }, - "DisableApiStop": { + "Groups": { + "target": "com.amazonaws.ec2#GroupIdStringList", + "traits": { + "smithy.api#documentation": "

Replaces the security groups of the instance with the specified security groups.\n You must specify the ID of at least one security group, even if it's just the default\n security group for the VPC.

", + "smithy.api#xmlName": "GroupId" + } + }, + "EbsOptimized": { "target": "com.amazonaws.ec2#AttributeBooleanValue", "traits": { - "smithy.api#documentation": "

Indicates whether an instance is enabled for stop protection. For more information,\n see Enable stop\n protection for your instance.

\n

" + "aws.protocols#ec2QueryName": "EbsOptimized", + "smithy.api#documentation": "

Specifies whether the instance is optimized for Amazon EBS I/O. This optimization\n provides dedicated throughput to Amazon EBS and an optimized configuration stack to\n provide optimal EBS I/O performance. This optimization isn't available with all instance\n types. Additional usage charges apply when using an EBS Optimized instance.

", + "smithy.api#xmlName": "ebsOptimized" + } + }, + "SriovNetSupport": { + "target": "com.amazonaws.ec2#AttributeValue", + "traits": { + "aws.protocols#ec2QueryName": "SriovNetSupport", + "smithy.api#documentation": "

Set to simple to enable enhanced networking with the Intel 82599 Virtual\n Function interface for the instance.

\n

There is no way to disable enhanced networking with the Intel 82599 Virtual Function\n interface at this time.

\n

This option is supported only for HVM instances. Specifying this option with a PV\n instance can make it unreachable.

", + "smithy.api#xmlName": "sriovNetSupport" + } + }, + "EnaSupport": { + "target": "com.amazonaws.ec2#AttributeBooleanValue", + "traits": { + "aws.protocols#ec2QueryName": "EnaSupport", + "smithy.api#documentation": "

Set to true to enable enhanced networking with ENA for the\n instance.

\n

This option is supported only for HVM instances. Specifying this option with a PV\n instance can make it unreachable.

", + "smithy.api#xmlName": "enaSupport" } } }, @@ -76190,6 +76721,88 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#ModifyInstanceCpuOptions": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#ModifyInstanceCpuOptionsRequest" + }, + "output": { + "target": "com.amazonaws.ec2#ModifyInstanceCpuOptionsResult" + }, + "traits": { + "smithy.api#documentation": "

By default, all vCPUs for the instance type are active when you launch an instance. When you \n\t\t\tconfigure the number of active vCPUs for the instance, it can help you save on licensing costs and \n\t\t\toptimize performance. The base cost of the instance remains unchanged.

\n

The number of active vCPUs equals the number of threads per CPU core multiplied by the number \n\t\t\tof cores. The instance must be in a Stopped state before you make changes.

\n \n

Some instance type options do not support this capability. For more information, see \n\t\t\t\tSupported CPU \n\t\t\t\t\toptions in the Amazon EC2 User Guide.

\n
" + } + }, + "com.amazonaws.ec2#ModifyInstanceCpuOptionsRequest": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the instance to update.

", + "smithy.api#required": {} + } + }, + "CoreCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The number of CPU cores to activate for the specified instance.

", + "smithy.api#required": {} + } + }, + "ThreadsPerCore": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The number of threads to run for each CPU core.

", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#ModifyInstanceCpuOptionsResult": { + "type": "structure", + "members": { + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", + "traits": { + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The ID of the instance that was updated.

", + "smithy.api#xmlName": "instanceId" + } + }, + "CoreCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "CoreCount", + "smithy.api#documentation": "

The number of CPU cores that are running for the specified instance after the \n\t\t\tupdate.

", + "smithy.api#xmlName": "coreCount" + } + }, + "ThreadsPerCore": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "ThreadsPerCore", + "smithy.api#documentation": "

The number of threads that are running per CPU core for the specified \n\t\t\tinstance after the update.

", + "smithy.api#xmlName": "threadsPerCore" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#ModifyInstanceCreditSpecification": { "type": "operation", "input": { @@ -76208,7 +76821,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "ClientToken": { @@ -76273,7 +76886,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } }, "InstanceId": { @@ -76496,7 +77109,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } } }, @@ -76623,26 +77236,28 @@ "com.amazonaws.ec2#ModifyInstancePlacementRequest": { "type": "structure", "members": { - "Affinity": { - "target": "com.amazonaws.ec2#Affinity", - "traits": { - "aws.protocols#ec2QueryName": "Affinity", - "smithy.api#documentation": "

The affinity setting for the instance. For more information, see Host affinity in the Amazon EC2 User Guide.

", - "smithy.api#xmlName": "affinity" - } - }, "GroupName": { "target": "com.amazonaws.ec2#PlacementGroupName", "traits": { "smithy.api#documentation": "

The name of the placement group in which to place the instance. For spread placement\n groups, the instance must have a tenancy of default. For cluster and\n partition placement groups, the instance must have a tenancy of default or\n dedicated.

\n

To remove an instance from a placement group, specify an empty string (\"\").

" } }, - "HostId": { - "target": "com.amazonaws.ec2#DedicatedHostId", + "PartitionNumber": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "HostId", - "smithy.api#documentation": "

The ID of the Dedicated Host with which to associate the instance.

", - "smithy.api#xmlName": "hostId" + "smithy.api#documentation": "

The number of the partition in which to place the instance. Valid only if the\n placement group strategy is set to partition.

" + } + }, + "HostResourceGroupArn": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The ARN of the host resource group in which to place the instance. The instance must\n have a tenancy of host to specify this parameter.

" + } + }, + "GroupId": { + "target": "com.amazonaws.ec2#PlacementGroupId", + "traits": { + "smithy.api#documentation": "

The Group Id of a placement group. You must specify the Placement Group Group Id to launch an instance in a shared placement\n group.

" } }, "InstanceId": { @@ -76663,22 +77278,20 @@ "smithy.api#xmlName": "tenancy" } }, - "PartitionNumber": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "smithy.api#documentation": "

The number of the partition in which to place the instance. Valid only if the\n placement group strategy is set to partition.

" - } - }, - "HostResourceGroupArn": { - "target": "com.amazonaws.ec2#String", + "Affinity": { + "target": "com.amazonaws.ec2#Affinity", "traits": { - "smithy.api#documentation": "

The ARN of the host resource group in which to place the instance. The instance must\n have a tenancy of host to specify this parameter.

" + "aws.protocols#ec2QueryName": "Affinity", + "smithy.api#documentation": "

The affinity setting for the instance. For more information, see Host affinity in the Amazon EC2 User Guide.

", + "smithy.api#xmlName": "affinity" } }, - "GroupId": { - "target": "com.amazonaws.ec2#PlacementGroupId", + "HostId": { + "target": "com.amazonaws.ec2#DedicatedHostId", "traits": { - "smithy.api#documentation": "

The Group Id of a placement group. You must specify the Placement Group Group Id to launch an instance in a shared placement\n group.

" + "aws.protocols#ec2QueryName": "HostId", + "smithy.api#documentation": "

The ID of the Dedicated Host with which to associate the instance.

", + "smithy.api#xmlName": "hostId" } } }, @@ -77400,20 +78013,28 @@ "com.amazonaws.ec2#ModifyNetworkInterfaceAttributeRequest": { "type": "structure", "members": { - "Attachment": { - "target": "com.amazonaws.ec2#NetworkInterfaceAttachmentChanges", + "EnaSrdSpecification": { + "target": "com.amazonaws.ec2#EnaSrdSpecification", "traits": { - "aws.protocols#ec2QueryName": "Attachment", - "smithy.api#documentation": "

Information about the interface attachment. If modifying the delete on\n\t\t\t\ttermination attribute, you must specify the ID of the interface\n\t\t\tattachment.

", - "smithy.api#xmlName": "attachment" + "smithy.api#documentation": "

Updates the ENA Express configuration for the network interface that’s attached to the\n\t\t\tinstance.

" } }, - "Description": { - "target": "com.amazonaws.ec2#AttributeValue", + "EnablePrimaryIpv6": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

A description for the network interface.

", - "smithy.api#xmlName": "description" + "smithy.api#documentation": "

If you’re modifying a network interface in a dual-stack or IPv6-only subnet, you have\n the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA\n address associated with an ENI that you have enabled to use a primary IPv6 address. Use\n this option if the instance that this ENI will be attached to relies on its IPv6 address\n not changing. Amazon Web Services will automatically assign an IPv6 address associated\n with the ENI attached to your instance to be the primary IPv6 address. Once you enable\n an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6\n GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6\n address until the instance is terminated or the network interface is detached. If you\n have multiple IPv6 addresses associated with an ENI attached to your instance and you\n enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI\n becomes the primary IPv6 address.

" + } + }, + "ConnectionTrackingSpecification": { + "target": "com.amazonaws.ec2#ConnectionTrackingSpecificationRequest", + "traits": { + "smithy.api#documentation": "

A connection tracking specification.

" + } + }, + "AssociatePublicIpAddress": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Indicates whether to assign a public IPv4 address to a network interface. \n This option can be enabled for any network interface but will only apply to the primary network interface (eth0).

" } }, "DryRun": { @@ -77424,13 +78045,6 @@ "smithy.api#xmlName": "dryRun" } }, - "Groups": { - "target": "com.amazonaws.ec2#SecurityGroupIdStringList", - "traits": { - "smithy.api#documentation": "

Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

", - "smithy.api#xmlName": "SecurityGroupId" - } - }, "NetworkInterfaceId": { "target": "com.amazonaws.ec2#NetworkInterfaceId", "traits": { @@ -77441,6 +78055,14 @@ "smithy.api#xmlName": "networkInterfaceId" } }, + "Description": { + "target": "com.amazonaws.ec2#AttributeValue", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

A description for the network interface.

", + "smithy.api#xmlName": "description" + } + }, "SourceDestCheck": { "target": "com.amazonaws.ec2#AttributeBooleanValue", "traits": { @@ -77449,28 +78071,19 @@ "smithy.api#xmlName": "sourceDestCheck" } }, - "EnaSrdSpecification": { - "target": "com.amazonaws.ec2#EnaSrdSpecification", - "traits": { - "smithy.api#documentation": "

Updates the ENA Express configuration for the network interface that’s attached to the\n\t\t\tinstance.

" - } - }, - "EnablePrimaryIpv6": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "smithy.api#documentation": "

If you’re modifying a network interface in a dual-stack or IPv6-only subnet, you have\n the option to assign a primary IPv6 IP address. A primary IPv6 address is an IPv6 GUA\n address associated with an ENI that you have enabled to use a primary IPv6 address. Use\n this option if the instance that this ENI will be attached to relies on its IPv6 address\n not changing. Amazon Web Services will automatically assign an IPv6 address associated\n with the ENI attached to your instance to be the primary IPv6 address. Once you enable\n an IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6\n GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6\n address until the instance is terminated or the network interface is detached. If you\n have multiple IPv6 addresses associated with an ENI attached to your instance and you\n enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI\n becomes the primary IPv6 address.

" - } - }, - "ConnectionTrackingSpecification": { - "target": "com.amazonaws.ec2#ConnectionTrackingSpecificationRequest", + "Groups": { + "target": "com.amazonaws.ec2#SecurityGroupIdStringList", "traits": { - "smithy.api#documentation": "

A connection tracking specification.

" + "smithy.api#documentation": "

Changes the security groups for the network interface. The new set of groups you specify replaces the current set. You must specify at least one group, even if it's just the default security group in the VPC. You must specify the ID of the security group, not the name.

", + "smithy.api#xmlName": "SecurityGroupId" } }, - "AssociatePublicIpAddress": { - "target": "com.amazonaws.ec2#Boolean", + "Attachment": { + "target": "com.amazonaws.ec2#NetworkInterfaceAttachmentChanges", "traits": { - "smithy.api#documentation": "

Indicates whether to assign a public IPv4 address to a network interface. \n This option can be enabled for any network interface but will only apply to the primary network interface (eth0).

" + "aws.protocols#ec2QueryName": "Attachment", + "smithy.api#documentation": "

Information about the interface attachment. If modifying the delete on\n\t\t\t\ttermination attribute, you must specify the ID of the interface\n\t\t\tattachment.

", + "smithy.api#xmlName": "attachment" } } }, @@ -77858,14 +78471,6 @@ "com.amazonaws.ec2#ModifySpotFleetRequestRequest": { "type": "structure", "members": { - "ExcessCapacityTerminationPolicy": { - "target": "com.amazonaws.ec2#ExcessCapacityTerminationPolicy", - "traits": { - "aws.protocols#ec2QueryName": "ExcessCapacityTerminationPolicy", - "smithy.api#documentation": "

Indicates whether running instances should be terminated if the target capacity\n of the Spot Fleet request is decreased below the current size of the Spot Fleet.

\n

Supported only for fleets of type maintain.

", - "smithy.api#xmlName": "excessCapacityTerminationPolicy" - } - }, "LaunchTemplateConfigs": { "target": "com.amazonaws.ec2#LaunchTemplateConfigList", "traits": { @@ -77873,6 +78478,18 @@ "smithy.api#xmlName": "LaunchTemplateConfig" } }, + "OnDemandTargetCapacity": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#documentation": "

The number of On-Demand Instances in the fleet.

" + } + }, + "Context": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } + }, "SpotFleetRequestId": { "target": "com.amazonaws.ec2#SpotFleetRequestId", "traits": { @@ -77891,16 +78508,12 @@ "smithy.api#xmlName": "targetCapacity" } }, - "OnDemandTargetCapacity": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "smithy.api#documentation": "

The number of On-Demand Instances in the fleet.

" - } - }, - "Context": { - "target": "com.amazonaws.ec2#String", + "ExcessCapacityTerminationPolicy": { + "target": "com.amazonaws.ec2#ExcessCapacityTerminationPolicy", "traits": { - "smithy.api#documentation": "

Reserved.

" + "aws.protocols#ec2QueryName": "ExcessCapacityTerminationPolicy", + "smithy.api#documentation": "

Indicates whether running instances should be terminated if the target capacity\n of the Spot Fleet request is decreased below the current size of the Spot Fleet.

\n

Supported only for fleets of type maintain.

", + "smithy.api#xmlName": "excessCapacityTerminationPolicy" } } }, @@ -78338,7 +78951,7 @@ "SecurityGroupReferencingSupport": { "target": "com.amazonaws.ec2#SecurityGroupReferencingSupportValue", "traits": { - "smithy.api#documentation": "\n

This parameter is in preview and may not be available for your account.

\n
\n

Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.

" + "smithy.api#documentation": "

Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management.\n\n

\n

This option is disabled by default.

\n

For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide.

" } }, "AutoAcceptSharedAttachments": { @@ -78566,7 +79179,7 @@ "SecurityGroupReferencingSupport": { "target": "com.amazonaws.ec2#SecurityGroupReferencingSupportValue", "traits": { - "smithy.api#documentation": "\n

This parameter is in preview and may not be available for your account.

\n
\n

Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.

" + "smithy.api#documentation": "

Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management.\n\n

\n

This option is disabled by default.

\n

For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide.

" } }, "Ipv6Support": { @@ -80462,7 +81075,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -84492,14 +85105,6 @@ "com.amazonaws.ec2#Placement": { "type": "structure", "members": { - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

The Availability Zone of the instance.

\n

If not specified, an Availability Zone will be automatically chosen for you based on\n the load balancing criteria for the Region.

\n

This parameter is not supported for CreateFleet.

", - "smithy.api#xmlName": "availabilityZone" - } - }, "Affinity": { "target": "com.amazonaws.ec2#String", "traits": { @@ -84563,6 +85168,14 @@ "smithy.api#documentation": "

The ID of the placement group that the instance is in. If you specify\n GroupId, you can't specify GroupName.

", "smithy.api#xmlName": "groupId" } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone of the instance.

\n

If not specified, an Availability Zone will be automatically chosen for you based on\n the load balancing criteria for the Region.

\n

This parameter is not supported for CreateFleet.

", + "smithy.api#xmlName": "availabilityZone" + } } }, "traits": { @@ -85179,12 +85792,12 @@ "com.amazonaws.ec2#PriceScheduleSpecification": { "type": "structure", "members": { - "CurrencyCode": { - "target": "com.amazonaws.ec2#CurrencyCodeValues", + "Term": { + "target": "com.amazonaws.ec2#Long", "traits": { - "aws.protocols#ec2QueryName": "CurrencyCode", - "smithy.api#documentation": "

The currency for transacting the Reserved Instance resale.\n\t\t\t\tAt this time, the only supported currency is USD.

", - "smithy.api#xmlName": "currencyCode" + "aws.protocols#ec2QueryName": "Term", + "smithy.api#documentation": "

The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

", + "smithy.api#xmlName": "term" } }, "Price": { @@ -85195,12 +85808,12 @@ "smithy.api#xmlName": "price" } }, - "Term": { - "target": "com.amazonaws.ec2#Long", + "CurrencyCode": { + "target": "com.amazonaws.ec2#CurrencyCodeValues", "traits": { - "aws.protocols#ec2QueryName": "Term", - "smithy.api#documentation": "

The number of months remaining in the reservation. For example, 2 is the second to the last month before the capacity reservation expires.

", - "smithy.api#xmlName": "term" + "aws.protocols#ec2QueryName": "CurrencyCode", + "smithy.api#documentation": "

The currency for transacting the Reserved Instance resale.\n\t\t\t\tAt this time, the only supported currency is USD.

", + "smithy.api#xmlName": "currencyCode" } } }, @@ -86594,6 +87207,12 @@ "smithy.api#required": {} } }, + "PurchaseTime": { + "target": "com.amazonaws.ec2#DateTime", + "traits": { + "smithy.api#documentation": "

The time at which to purchase the Reserved Instance, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -86609,12 +87228,6 @@ "smithy.api#documentation": "

Specified for Reserved Instance Marketplace offerings to limit the total order and ensure that the Reserved Instances are not purchased at unexpected prices.

", "smithy.api#xmlName": "limitPrice" } - }, - "PurchaseTime": { - "target": "com.amazonaws.ec2#DateTime", - "traits": { - "smithy.api#documentation": "

The time at which to purchase the Reserved Instance, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ).

" - } } }, "traits": { @@ -86798,7 +87411,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -86902,12 +87515,12 @@ "com.amazonaws.ec2#Region": { "type": "structure", "members": { - "Endpoint": { + "OptInStatus": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "RegionEndpoint", - "smithy.api#documentation": "

The Region service endpoint.

", - "smithy.api#xmlName": "regionEndpoint" + "aws.protocols#ec2QueryName": "OptInStatus", + "smithy.api#documentation": "

The Region opt-in status. The possible values are opt-in-not-required, opted-in, and \n not-opted-in.

", + "smithy.api#xmlName": "optInStatus" } }, "RegionName": { @@ -86918,12 +87531,12 @@ "smithy.api#xmlName": "regionName" } }, - "OptInStatus": { + "Endpoint": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "OptInStatus", - "smithy.api#documentation": "

The Region opt-in status. The possible values are opt-in-not-required, opted-in, and \n not-opted-in.

", - "smithy.api#xmlName": "optInStatus" + "aws.protocols#ec2QueryName": "RegionEndpoint", + "smithy.api#documentation": "

The Region service endpoint.

", + "smithy.api#xmlName": "regionEndpoint" } } }, @@ -86982,51 +87595,50 @@ "smithy.api#documentation": "

The full path to your AMI manifest in Amazon S3 storage. The specified bucket must have the \n \t\taws-exec-read canned access control list (ACL) to ensure that it can be accessed \n \t\tby Amazon EC2. For more information, see Canned ACLs in the \n \t\tAmazon S3 Service Developer Guide.

" } }, - "Architecture": { - "target": "com.amazonaws.ec2#ArchitectureValues", + "BillingProducts": { + "target": "com.amazonaws.ec2#BillingProductList", "traits": { - "aws.protocols#ec2QueryName": "Architecture", - "smithy.api#documentation": "

The architecture of the AMI.

\n

Default: For Amazon EBS-backed AMIs, i386.\n For instance store-backed AMIs, the architecture specified in the manifest file.

", - "smithy.api#xmlName": "architecture" + "smithy.api#documentation": "

The billing product codes. Your account must be authorized to specify billing product codes.

\n

If your account is not authorized to specify billing product codes, you can publish AMIs\n that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller\n on the Amazon Web Services Marketplace. For more information, see Getting started as a\n seller and AMI-based\n products in the Amazon Web Services Marketplace Seller Guide.

", + "smithy.api#xmlName": "BillingProduct" } }, - "BlockDeviceMappings": { - "target": "com.amazonaws.ec2#BlockDeviceMappingRequestList", + "BootMode": { + "target": "com.amazonaws.ec2#BootModeValues", "traits": { - "smithy.api#documentation": "

The block device mapping entries.

\n

If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

\n

If you create an AMI on an Outpost, then all backing snapshots must be on the same\n Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can\n be used to launch instances on the same Outpost only. For more information, Amazon EBS local\n snapshots on Outposts in the Amazon EBS User Guide.

", - "smithy.api#xmlName": "BlockDeviceMapping" + "smithy.api#documentation": "

The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS.

\n \n

The operating system contained in the AMI must be configured to support the specified boot mode.

\n
\n

For more information, see Boot modes in the\n Amazon EC2 User Guide.

" } }, - "Description": { - "target": "com.amazonaws.ec2#String", + "TpmSupport": { + "target": "com.amazonaws.ec2#TpmSupportValues", "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

A description for your AMI.

", - "smithy.api#xmlName": "description" + "smithy.api#documentation": "

Set to v2.0 to enable Trusted Platform Module (TPM) support. For more\n information, see NitroTPM in the Amazon EC2 User Guide.

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", + "UefiData": { + "target": "com.amazonaws.ec2#StringType", "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" + "smithy.api#documentation": "

Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data,\n use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the\n python-uefivars tool on\n GitHub. For more information, see UEFI Secure Boot in the\n Amazon EC2 User Guide.

" } }, - "EnaSupport": { - "target": "com.amazonaws.ec2#Boolean", + "ImdsSupport": { + "target": "com.amazonaws.ec2#ImdsSupportValues", "traits": { - "aws.protocols#ec2QueryName": "EnaSupport", - "smithy.api#documentation": "

Set to true to enable enhanced networking with ENA for the AMI and any instances that you launch from the AMI.

\n

This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

", - "smithy.api#xmlName": "enaSupport" + "smithy.api#documentation": "

Set to v2.0 to indicate that IMDSv2 is specified in the AMI. Instances\n launched from this AMI will have HttpTokens automatically set to\n required so that, by default, the instance requires that IMDSv2 is used when\n requesting instance metadata. In addition, HttpPutResponseHopLimit is set to\n 2. For more information, see Configure\n the AMI in the Amazon EC2 User Guide.

\n \n

If you set the value to v2.0, make sure that your AMI software can support IMDSv2.

\n
" } }, - "KernelId": { - "target": "com.amazonaws.ec2#KernelId", + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "aws.protocols#ec2QueryName": "KernelId", - "smithy.api#documentation": "

The ID of the kernel.

", - "smithy.api#xmlName": "kernelId" + "smithy.api#documentation": "

The tags to apply to the AMI.

\n

To tag the AMI, the value for ResourceType must be image. If you\n specify another value for ResourceType, the request fails.

\n

To tag an AMI after it has been registered, see CreateTags.

", + "smithy.api#xmlName": "TagSpecification" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" } }, "Name": { @@ -87039,11 +87651,28 @@ "smithy.api#xmlName": "name" } }, - "BillingProducts": { - "target": "com.amazonaws.ec2#BillingProductList", + "Description": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The billing product codes. Your account must be authorized to specify billing product codes.

\n

If your account is not authorized to specify billing product codes, you can publish AMIs\n that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller\n on the Amazon Web Services Marketplace. For more information, see Getting started as a\n seller and AMI-based\n products in the Amazon Web Services Marketplace Seller Guide.

", - "smithy.api#xmlName": "BillingProduct" + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

A description for your AMI.

", + "smithy.api#xmlName": "description" + } + }, + "Architecture": { + "target": "com.amazonaws.ec2#ArchitectureValues", + "traits": { + "aws.protocols#ec2QueryName": "Architecture", + "smithy.api#documentation": "

The architecture of the AMI.

\n

Default: For Amazon EBS-backed AMIs, i386.\n For instance store-backed AMIs, the architecture specified in the manifest file.

", + "smithy.api#xmlName": "architecture" + } + }, + "KernelId": { + "target": "com.amazonaws.ec2#KernelId", + "traits": { + "aws.protocols#ec2QueryName": "KernelId", + "smithy.api#documentation": "

The ID of the kernel.

", + "smithy.api#xmlName": "kernelId" } }, "RamdiskId": { @@ -87062,12 +87691,11 @@ "smithy.api#xmlName": "rootDeviceName" } }, - "SriovNetSupport": { - "target": "com.amazonaws.ec2#String", + "BlockDeviceMappings": { + "target": "com.amazonaws.ec2#BlockDeviceMappingRequestList", "traits": { - "aws.protocols#ec2QueryName": "SriovNetSupport", - "smithy.api#documentation": "

Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.

\n

There is no way to disable sriovNetSupport at this time.

\n

This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

", - "smithy.api#xmlName": "sriovNetSupport" + "smithy.api#documentation": "

The block device mapping entries.

\n

If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

\n

If you create an AMI on an Outpost, then all backing snapshots must be on the same\n Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can\n be used to launch instances on the same Outpost only. For more information, Amazon EBS local\n snapshots on Outposts in the Amazon EBS User Guide.

", + "smithy.api#xmlName": "BlockDeviceMapping" } }, "VirtualizationType": { @@ -87078,35 +87706,20 @@ "smithy.api#xmlName": "virtualizationType" } }, - "BootMode": { - "target": "com.amazonaws.ec2#BootModeValues", - "traits": { - "smithy.api#documentation": "

The boot mode of the AMI. A value of uefi-preferred indicates that the AMI supports both UEFI and Legacy BIOS.

\n \n

The operating system contained in the AMI must be configured to support the specified boot mode.

\n
\n

For more information, see Boot modes in the\n Amazon EC2 User Guide.

" - } - }, - "TpmSupport": { - "target": "com.amazonaws.ec2#TpmSupportValues", - "traits": { - "smithy.api#documentation": "

Set to v2.0 to enable Trusted Platform Module (TPM) support. For more\n information, see NitroTPM in the Amazon EC2 User Guide.

" - } - }, - "UefiData": { - "target": "com.amazonaws.ec2#StringType", - "traits": { - "smithy.api#documentation": "

Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data,\n use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the\n python-uefivars tool on\n GitHub. For more information, see UEFI Secure Boot in the\n Amazon EC2 User Guide.

" - } - }, - "ImdsSupport": { - "target": "com.amazonaws.ec2#ImdsSupportValues", + "SriovNetSupport": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

Set to v2.0 to indicate that IMDSv2 is specified in the AMI. Instances\n launched from this AMI will have HttpTokens automatically set to\n required so that, by default, the instance requires that IMDSv2 is used when\n requesting instance metadata. In addition, HttpPutResponseHopLimit is set to\n 2. For more information, see Configure\n the AMI in the Amazon EC2 User Guide.

\n \n

If you set the value to v2.0, make sure that your AMI software can support IMDSv2.

\n
" + "aws.protocols#ec2QueryName": "SriovNetSupport", + "smithy.api#documentation": "

Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.

\n

There is no way to disable sriovNetSupport at this time.

\n

This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

", + "smithy.api#xmlName": "sriovNetSupport" } }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "EnaSupport": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

The tags to apply to the AMI.

\n

To tag the AMI, the value for ResourceType must be image. If you\n specify another value for ResourceType, the request fails.

\n

To tag an AMI after it has been registered, see CreateTags.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "EnaSupport", + "smithy.api#documentation": "

Set to true to enable enhanced networking with ENA for the AMI and any instances that you launch from the AMI.

\n

This option is supported only for HVM AMIs. Specifying this option with a PV AMI can make instances launched from the AMI unreachable.

", + "smithy.api#xmlName": "enaSupport" } } }, @@ -87331,6 +87944,56 @@ "smithy.api#output": {} } }, + "com.amazonaws.ec2#RejectCapacityReservationBillingOwnership": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#RejectCapacityReservationBillingOwnershipRequest" + }, + "output": { + "target": "com.amazonaws.ec2#RejectCapacityReservationBillingOwnershipResult" + }, + "traits": { + "smithy.api#documentation": "

Rejects a request to assign billing of the available capacity of a shared Capacity Reservation \n\t\t\tto your account. For more information, see \n\t\t\t\tBilling assignment for shared Amazon EC2 Capacity Reservations.

" + } + }, + "com.amazonaws.ec2#RejectCapacityReservationBillingOwnershipRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

" + } + }, + "CapacityReservationId": { + "target": "com.amazonaws.ec2#CapacityReservationId", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ID of the Capacity Reservation for which to reject the request.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.ec2#RejectCapacityReservationBillingOwnershipResult": { + "type": "structure", + "members": { + "Return": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#documentation": "

Returns true if the request succeeds; otherwise, it returns an error.

", + "smithy.api#xmlName": "return" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.ec2#RejectTransitGatewayMulticastDomainAssociations": { "type": "operation", "input": { @@ -87913,6 +88576,14 @@ "com.amazonaws.ec2#ReplaceNetworkAclAssociationRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "AssociationId": { "target": "com.amazonaws.ec2#NetworkAclAssociationId", "traits": { @@ -87923,14 +88594,6 @@ "smithy.api#xmlName": "associationId" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "NetworkAclId": { "target": "com.amazonaws.ec2#NetworkAclId", "traits": { @@ -87995,14 +88658,6 @@ "com.amazonaws.ec2#ReplaceNetworkAclEntryRequest": { "type": "structure", "members": { - "CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CidrBlock", - "smithy.api#documentation": "

The IPv4 network range to allow or deny, in CIDR notation (for example\n 172.16.0.0/24).

", - "smithy.api#xmlName": "cidrBlock" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -88011,31 +88666,6 @@ "smithy.api#xmlName": "dryRun" } }, - "Egress": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "Egress", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Indicates whether to replace the egress rule.

\n

Default: If no value is specified, we replace the ingress rule.

", - "smithy.api#required": {}, - "smithy.api#xmlName": "egress" - } - }, - "IcmpTypeCode": { - "target": "com.amazonaws.ec2#IcmpTypeCode", - "traits": { - "smithy.api#documentation": "

ICMP protocol: The ICMP or ICMPv6 type and code. Required if specifying protocol\n\t\t 1 (ICMP) or protocol 58 (ICMPv6) with an IPv6 CIDR block.

", - "smithy.api#xmlName": "Icmp" - } - }, - "Ipv6CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6CidrBlock", - "smithy.api#documentation": "

The IPv6 network range to allow or deny, in CIDR notation (for example\n 2001:bd8:1234:1a00::/64).

", - "smithy.api#xmlName": "ipv6CidrBlock" - } - }, "NetworkAclId": { "target": "com.amazonaws.ec2#NetworkAclId", "traits": { @@ -88046,12 +88676,14 @@ "smithy.api#xmlName": "networkAclId" } }, - "PortRange": { - "target": "com.amazonaws.ec2#PortRange", + "RuleNumber": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "PortRange", - "smithy.api#documentation": "

TCP or UDP protocols: The range of ports the rule applies to. \n\t\t Required if specifying protocol 6 (TCP) or 17 (UDP).

", - "smithy.api#xmlName": "portRange" + "aws.protocols#ec2QueryName": "RuleNumber", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The rule number of the entry to replace.

", + "smithy.api#required": {}, + "smithy.api#xmlName": "ruleNumber" } }, "Protocol": { @@ -88074,14 +88706,45 @@ "smithy.api#xmlName": "ruleAction" } }, - "RuleNumber": { - "target": "com.amazonaws.ec2#Integer", + "Egress": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "RuleNumber", + "aws.protocols#ec2QueryName": "Egress", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The rule number of the entry to replace.

", + "smithy.api#documentation": "

Indicates whether to replace the egress rule.

\n

Default: If no value is specified, we replace the ingress rule.

", "smithy.api#required": {}, - "smithy.api#xmlName": "ruleNumber" + "smithy.api#xmlName": "egress" + } + }, + "CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CidrBlock", + "smithy.api#documentation": "

The IPv4 network range to allow or deny, in CIDR notation (for example\n 172.16.0.0/24).

", + "smithy.api#xmlName": "cidrBlock" + } + }, + "Ipv6CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6CidrBlock", + "smithy.api#documentation": "

The IPv6 network range to allow or deny, in CIDR notation (for example\n 2001:bd8:1234:1a00::/64).

", + "smithy.api#xmlName": "ipv6CidrBlock" + } + }, + "IcmpTypeCode": { + "target": "com.amazonaws.ec2#IcmpTypeCode", + "traits": { + "smithy.api#documentation": "

ICMP protocol: The ICMP or ICMPv6 type and code. Required if specifying protocol\n\t\t 1 (ICMP) or protocol 58 (ICMPv6) with an IPv6 CIDR block.

", + "smithy.api#xmlName": "Icmp" + } + }, + "PortRange": { + "target": "com.amazonaws.ec2#PortRange", + "traits": { + "aws.protocols#ec2QueryName": "PortRange", + "smithy.api#documentation": "

TCP or UDP protocols: The range of ports the rule applies to. \n\t\t Required if specifying protocol 6 (TCP) or 17 (UDP).

", + "smithy.api#xmlName": "portRange" } } }, @@ -88257,80 +88920,24 @@ "com.amazonaws.ec2#ReplaceRouteRequest": { "type": "structure", "members": { - "DestinationCidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DestinationCidrBlock", - "smithy.api#documentation": "

The IPv4 CIDR address block used for the destination match. The value that you\n\t\t\tprovide must match the CIDR of an existing route in the table.

", - "smithy.api#xmlName": "destinationCidrBlock" - } - }, - "DestinationIpv6CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DestinationIpv6CidrBlock", - "smithy.api#documentation": "

The IPv6 CIDR address block used for the destination match. The value that you\n\t\t\tprovide must match the CIDR of an existing route in the table.

", - "smithy.api#xmlName": "destinationIpv6CidrBlock" - } - }, "DestinationPrefixListId": { "target": "com.amazonaws.ec2#PrefixListResourceId", "traits": { "smithy.api#documentation": "

The ID of the prefix list for the route.

" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "VpcEndpointId": { "target": "com.amazonaws.ec2#VpcEndpointId", "traits": { "smithy.api#documentation": "

The ID of a VPC endpoint. Supported for Gateway Load Balancer endpoints only.

" } }, - "EgressOnlyInternetGatewayId": { - "target": "com.amazonaws.ec2#EgressOnlyInternetGatewayId", - "traits": { - "aws.protocols#ec2QueryName": "EgressOnlyInternetGatewayId", - "smithy.api#documentation": "

[IPv6 traffic only] The ID of an egress-only internet gateway.

", - "smithy.api#xmlName": "egressOnlyInternetGatewayId" - } - }, - "GatewayId": { - "target": "com.amazonaws.ec2#RouteGatewayId", - "traits": { - "aws.protocols#ec2QueryName": "GatewayId", - "smithy.api#documentation": "

The ID of an internet gateway or virtual private gateway.

", - "smithy.api#xmlName": "gatewayId" - } - }, - "InstanceId": { - "target": "com.amazonaws.ec2#InstanceId", - "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#documentation": "

The ID of a NAT instance in your VPC.

", - "smithy.api#xmlName": "instanceId" - } - }, "LocalTarget": { "target": "com.amazonaws.ec2#Boolean", "traits": { "smithy.api#documentation": "

Specifies whether to reset the local route to its default target (local).

" } }, - "NatGatewayId": { - "target": "com.amazonaws.ec2#NatGatewayId", - "traits": { - "aws.protocols#ec2QueryName": "NatGatewayId", - "smithy.api#documentation": "

[IPv4 traffic only] The ID of a NAT gateway.

", - "smithy.api#xmlName": "natGatewayId" - } - }, "TransitGatewayId": { "target": "com.amazonaws.ec2#TransitGatewayId", "traits": { @@ -88349,12 +88956,18 @@ "smithy.api#documentation": "

[IPv4 traffic only] The ID of a carrier gateway.

" } }, - "NetworkInterfaceId": { - "target": "com.amazonaws.ec2#NetworkInterfaceId", + "CoreNetworkArn": { + "target": "com.amazonaws.ec2#CoreNetworkArn", "traits": { - "aws.protocols#ec2QueryName": "NetworkInterfaceId", - "smithy.api#documentation": "

The ID of a network interface.

", - "smithy.api#xmlName": "networkInterfaceId" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the core network.

" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" } }, "RouteTableId": { @@ -88367,6 +88980,54 @@ "smithy.api#xmlName": "routeTableId" } }, + "DestinationCidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DestinationCidrBlock", + "smithy.api#documentation": "

The IPv4 CIDR address block used for the destination match. The value that you\n\t\t\tprovide must match the CIDR of an existing route in the table.

", + "smithy.api#xmlName": "destinationCidrBlock" + } + }, + "GatewayId": { + "target": "com.amazonaws.ec2#RouteGatewayId", + "traits": { + "aws.protocols#ec2QueryName": "GatewayId", + "smithy.api#documentation": "

The ID of an internet gateway or virtual private gateway.

", + "smithy.api#xmlName": "gatewayId" + } + }, + "DestinationIpv6CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DestinationIpv6CidrBlock", + "smithy.api#documentation": "

The IPv6 CIDR address block used for the destination match. The value that you\n\t\t\tprovide must match the CIDR of an existing route in the table.

", + "smithy.api#xmlName": "destinationIpv6CidrBlock" + } + }, + "EgressOnlyInternetGatewayId": { + "target": "com.amazonaws.ec2#EgressOnlyInternetGatewayId", + "traits": { + "aws.protocols#ec2QueryName": "EgressOnlyInternetGatewayId", + "smithy.api#documentation": "

[IPv6 traffic only] The ID of an egress-only internet gateway.

", + "smithy.api#xmlName": "egressOnlyInternetGatewayId" + } + }, + "InstanceId": { + "target": "com.amazonaws.ec2#InstanceId", + "traits": { + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The ID of a NAT instance in your VPC.

", + "smithy.api#xmlName": "instanceId" + } + }, + "NetworkInterfaceId": { + "target": "com.amazonaws.ec2#NetworkInterfaceId", + "traits": { + "aws.protocols#ec2QueryName": "NetworkInterfaceId", + "smithy.api#documentation": "

The ID of a network interface.

", + "smithy.api#xmlName": "networkInterfaceId" + } + }, "VpcPeeringConnectionId": { "target": "com.amazonaws.ec2#VpcPeeringConnectionId", "traits": { @@ -88375,10 +89036,12 @@ "smithy.api#xmlName": "vpcPeeringConnectionId" } }, - "CoreNetworkArn": { - "target": "com.amazonaws.ec2#CoreNetworkArn", + "NatGatewayId": { + "target": "com.amazonaws.ec2#NatGatewayId", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the core network.

" + "aws.protocols#ec2QueryName": "NatGatewayId", + "smithy.api#documentation": "

[IPv4 traffic only] The ID of a NAT gateway.

", + "smithy.api#xmlName": "natGatewayId" } } }, @@ -88414,6 +89077,14 @@ "com.amazonaws.ec2#ReplaceRouteTableAssociationRequest": { "type": "structure", "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, "AssociationId": { "target": "com.amazonaws.ec2#RouteTableAssociationId", "traits": { @@ -88424,14 +89095,6 @@ "smithy.api#xmlName": "associationId" } }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, "RouteTableId": { "target": "com.amazonaws.ec2#RouteTableId", "traits": { @@ -88696,30 +89359,14 @@ "com.amazonaws.ec2#ReportInstanceStatusRequest": { "type": "structure", "members": { - "Description": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

Descriptive text about the health state of your instance.

", - "smithy.api#xmlName": "description" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, - "EndTime": { - "target": "com.amazonaws.ec2#DateTime", - "traits": { - "aws.protocols#ec2QueryName": "EndTime", - "smithy.api#documentation": "

The time at which the reported instance health state ended.

", - "smithy.api#xmlName": "endTime" - } - }, "Instances": { "target": "com.amazonaws.ec2#InstanceIdStringList", "traits": { @@ -88730,14 +89377,14 @@ "smithy.api#xmlName": "instanceId" } }, - "ReasonCodes": { - "target": "com.amazonaws.ec2#ReasonCodesList", + "Status": { + "target": "com.amazonaws.ec2#ReportStatusType", "traits": { - "aws.protocols#ec2QueryName": "ReasonCode", + "aws.protocols#ec2QueryName": "Status", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The reason codes that describe the health state of your instance.

\n
    \n
  • \n

    \n instance-stuck-in-state: My instance is stuck in a state.

    \n
  • \n
  • \n

    \n unresponsive: My instance is unresponsive.

    \n
  • \n
  • \n

    \n not-accepting-credentials: My instance is not accepting my\n credentials.

    \n
  • \n
  • \n

    \n password-not-available: A password is not available for my\n instance.

    \n
  • \n
  • \n

    \n performance-network: My instance is experiencing performance\n problems that I believe are network related.

    \n
  • \n
  • \n

    \n performance-instance-store: My instance is experiencing performance\n problems that I believe are related to the instance stores.

    \n
  • \n
  • \n

    \n performance-ebs-volume: My instance is experiencing performance\n problems that I believe are related to an EBS volume.

    \n
  • \n
  • \n

    \n performance-other: My instance is experiencing performance\n problems.

    \n
  • \n
  • \n

    \n other: [explain using the description parameter]

    \n
  • \n
", + "smithy.api#documentation": "

The status of all instances listed.

", "smithy.api#required": {}, - "smithy.api#xmlName": "reasonCode" + "smithy.api#xmlName": "status" } }, "StartTime": { @@ -88748,14 +89395,33 @@ "smithy.api#xmlName": "startTime" } }, - "Status": { - "target": "com.amazonaws.ec2#ReportStatusType", + "EndTime": { + "target": "com.amazonaws.ec2#DateTime", "traits": { - "aws.protocols#ec2QueryName": "Status", + "aws.protocols#ec2QueryName": "EndTime", + "smithy.api#documentation": "

The time at which the reported instance health state ended.

", + "smithy.api#xmlName": "endTime" + } + }, + "ReasonCodes": { + "target": "com.amazonaws.ec2#ReasonCodesList", + "traits": { + "aws.protocols#ec2QueryName": "ReasonCode", "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The status of all instances listed.

", + "smithy.api#documentation": "

The reason codes that describe the health state of your instance.

\n
    \n
  • \n

    \n instance-stuck-in-state: My instance is stuck in a state.

    \n
  • \n
  • \n

    \n unresponsive: My instance is unresponsive.

    \n
  • \n
  • \n

    \n not-accepting-credentials: My instance is not accepting my\n credentials.

    \n
  • \n
  • \n

    \n password-not-available: A password is not available for my\n instance.

    \n
  • \n
  • \n

    \n performance-network: My instance is experiencing performance\n problems that I believe are network related.

    \n
  • \n
  • \n

    \n performance-instance-store: My instance is experiencing performance\n problems that I believe are related to the instance stores.

    \n
  • \n
  • \n

    \n performance-ebs-volume: My instance is experiencing performance\n problems that I believe are related to an EBS volume.

    \n
  • \n
  • \n

    \n performance-other: My instance is experiencing performance\n problems.

    \n
  • \n
  • \n

    \n other: [explain using the description parameter]

    \n
  • \n
", "smithy.api#required": {}, - "smithy.api#xmlName": "status" + "smithy.api#xmlName": "reasonCode" + } + }, + "Description": { + "target": "com.amazonaws.ec2#ReportInstanceStatusRequestDescription", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#deprecated": { + "message": "This member has been deprecated" + }, + "smithy.api#documentation": "

Descriptive text about the health state of your instance.

", + "smithy.api#xmlName": "description" } } }, @@ -88763,6 +89429,12 @@ "smithy.api#input": {} } }, + "com.amazonaws.ec2#ReportInstanceStatusRequestDescription": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.ec2#ReportStatusType": { "type": "enum", "members": { @@ -88958,14 +89630,14 @@ "ElasticGpuSpecifications": { "target": "com.amazonaws.ec2#ElasticGpuSpecificationList", "traits": { - "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
", + "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
", "smithy.api#xmlName": "ElasticGpuSpecification" } }, "ElasticInferenceAccelerators": { "target": "com.amazonaws.ec2#LaunchTemplateElasticInferenceAcceleratorList", "traits": { - "smithy.api#documentation": "

An elastic inference accelerator to associate with the instance. Elastic inference\n accelerators are a resource you can attach to your Amazon EC2 instances to accelerate\n your Deep Learning (DL) inference workloads.

\n

You cannot specify accelerators from different generations in the same request.

\n \n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon\n Elastic Inference (EI), and will help current customers migrate their workloads to\n options that offer better price and performance. After April 15, 2023, new customers\n will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker,\n Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during\n the past 30-day period are considered current customers and will be able to continue\n using the service.

\n
", + "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Inference is no longer available.

\n
", "smithy.api#xmlName": "ElasticInferenceAccelerator" } }, @@ -89307,28 +89979,23 @@ "com.amazonaws.ec2#RequestSpotInstancesRequest": { "type": "structure", "members": { - "AvailabilityZoneGroup": { - "target": "com.amazonaws.ec2#String", + "LaunchSpecification": { + "target": "com.amazonaws.ec2#RequestSpotLaunchSpecification", "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZoneGroup", - "smithy.api#documentation": "

The user-specified name for a logical grouping of requests.

\n

When you specify an Availability Zone group in a Spot Instance request, all Spot\n Instances in the request are launched in the same Availability Zone. Instance proximity\n is maintained with this parameter, but the choice of Availability Zone is not. The group\n applies only to requests for Spot Instances of the same instance type. Any additional\n Spot Instance requests that are specified with the same Availability Zone group name are\n launched in that same Availability Zone, as long as at least one instance from the group\n is still active.

\n

If there is no active instance running in the Availability Zone group that you specify\n for a new Spot Instance request (all instances are terminated, the request is expired,\n or the maximum price you specified falls below current Spot price), then Amazon EC2 launches\n the instance in any Availability Zone where the constraint can be met. Consequently, the\n subsequent set of Spot Instances could be placed in a different zone from the original\n request, even if you specified the same Availability Zone group.

\n

Default: Instances are launched in any available Availability Zone.

", - "smithy.api#xmlName": "availabilityZoneGroup" + "smithy.api#documentation": "

The launch specification.

" } }, - "BlockDurationMinutes": { - "target": "com.amazonaws.ec2#Integer", + "TagSpecifications": { + "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "aws.protocols#ec2QueryName": "BlockDurationMinutes", - "smithy.api#documentation": "

Deprecated.

", - "smithy.api#xmlName": "blockDurationMinutes" + "smithy.api#documentation": "

The key-value pair for tagging the Spot Instance request on creation. The value for\n ResourceType must be spot-instances-request, otherwise the\n Spot Instance request fails. To tag the Spot Instance request after it has been created,\n see CreateTags.

", + "smithy.api#xmlName": "TagSpecification" } }, - "ClientToken": { - "target": "com.amazonaws.ec2#String", + "InstanceInterruptionBehavior": { + "target": "com.amazonaws.ec2#InstanceInterruptionBehavior", "traits": { - "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency in\n Amazon EC2 API requests in the Amazon EC2 User Guide.

", - "smithy.api#xmlName": "clientToken" + "smithy.api#documentation": "

The behavior when a Spot Instance is interrupted. The default is terminate.

" } }, "DryRun": { @@ -89339,34 +90006,28 @@ "smithy.api#xmlName": "dryRun" } }, - "InstanceCount": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "InstanceCount", - "smithy.api#documentation": "

The maximum number of Spot Instances to launch.

\n

Default: 1

", - "smithy.api#xmlName": "instanceCount" - } - }, - "LaunchGroup": { + "SpotPrice": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "LaunchGroup", - "smithy.api#documentation": "

The instance launch group. Launch groups are Spot Instances that launch together and\n terminate together.

\n

Default: Instances are launched and terminated individually

", - "smithy.api#xmlName": "launchGroup" + "aws.protocols#ec2QueryName": "SpotPrice", + "smithy.api#documentation": "

The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.

\n
", + "smithy.api#xmlName": "spotPrice" } }, - "LaunchSpecification": { - "target": "com.amazonaws.ec2#RequestSpotLaunchSpecification", + "ClientToken": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The launch specification.

" + "aws.protocols#ec2QueryName": "ClientToken", + "smithy.api#documentation": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. For more information, see Ensuring idempotency in\n Amazon EC2 API requests in the Amazon EC2 User Guide.

", + "smithy.api#xmlName": "clientToken" } }, - "SpotPrice": { - "target": "com.amazonaws.ec2#String", + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "SpotPrice", - "smithy.api#documentation": "

The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend \n using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.

\n \n

If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.

\n
", - "smithy.api#xmlName": "spotPrice" + "aws.protocols#ec2QueryName": "InstanceCount", + "smithy.api#documentation": "

The maximum number of Spot Instances to launch.

\n

Default: 1

", + "smithy.api#xmlName": "instanceCount" } }, "Type": { @@ -89393,17 +90054,28 @@ "smithy.api#xmlName": "validUntil" } }, - "TagSpecifications": { - "target": "com.amazonaws.ec2#TagSpecificationList", + "LaunchGroup": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The key-value pair for tagging the Spot Instance request on creation. The value for\n ResourceType must be spot-instances-request, otherwise the\n Spot Instance request fails. To tag the Spot Instance request after it has been created,\n see CreateTags.

", - "smithy.api#xmlName": "TagSpecification" + "aws.protocols#ec2QueryName": "LaunchGroup", + "smithy.api#documentation": "

The instance launch group. Launch groups are Spot Instances that launch together and\n terminate together.

\n

Default: Instances are launched and terminated individually

", + "smithy.api#xmlName": "launchGroup" } }, - "InstanceInterruptionBehavior": { - "target": "com.amazonaws.ec2#InstanceInterruptionBehavior", + "AvailabilityZoneGroup": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The behavior when a Spot Instance is interrupted. The default is terminate.

" + "aws.protocols#ec2QueryName": "AvailabilityZoneGroup", + "smithy.api#documentation": "

The user-specified name for a logical grouping of requests.

\n

When you specify an Availability Zone group in a Spot Instance request, all Spot\n Instances in the request are launched in the same Availability Zone. Instance proximity\n is maintained with this parameter, but the choice of Availability Zone is not. The group\n applies only to requests for Spot Instances of the same instance type. Any additional\n Spot Instance requests that are specified with the same Availability Zone group name are\n launched in that same Availability Zone, as long as at least one instance from the group\n is still active.

\n

If there is no active instance running in the Availability Zone group that you specify\n for a new Spot Instance request (all instances are terminated, the request is expired,\n or the maximum price you specified falls below current Spot price), then Amazon EC2 launches\n the instance in any Availability Zone where the constraint can be met. Consequently, the\n subsequent set of Spot Instances could be placed in a different zone from the original\n request, even if you specified the same Availability Zone group.

\n

Default: Instances are launched in any available Availability Zone.

", + "smithy.api#xmlName": "availabilityZoneGroup" + } + }, + "BlockDurationMinutes": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "BlockDurationMinutes", + "smithy.api#documentation": "

Deprecated.

", + "smithy.api#xmlName": "blockDurationMinutes" } } }, @@ -89583,20 +90255,12 @@ "com.amazonaws.ec2#Reservation": { "type": "structure", "members": { - "Groups": { - "target": "com.amazonaws.ec2#GroupIdentifierList", - "traits": { - "aws.protocols#ec2QueryName": "GroupSet", - "smithy.api#documentation": "

Not supported.

", - "smithy.api#xmlName": "groupSet" - } - }, - "Instances": { - "target": "com.amazonaws.ec2#InstanceList", + "ReservationId": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "InstancesSet", - "smithy.api#documentation": "

The instances.

", - "smithy.api#xmlName": "instancesSet" + "aws.protocols#ec2QueryName": "ReservationId", + "smithy.api#documentation": "

The ID of the reservation.

", + "smithy.api#xmlName": "reservationId" } }, "OwnerId": { @@ -89615,12 +90279,20 @@ "smithy.api#xmlName": "requesterId" } }, - "ReservationId": { - "target": "com.amazonaws.ec2#String", + "Groups": { + "target": "com.amazonaws.ec2#GroupIdentifierList", "traits": { - "aws.protocols#ec2QueryName": "ReservationId", - "smithy.api#documentation": "

The ID of the reservation.

", - "smithy.api#xmlName": "reservationId" + "aws.protocols#ec2QueryName": "GroupSet", + "smithy.api#documentation": "

Not supported.

", + "smithy.api#xmlName": "groupSet" + } + }, + "Instances": { + "target": "com.amazonaws.ec2#InstanceList", + "traits": { + "aws.protocols#ec2QueryName": "InstancesSet", + "smithy.api#documentation": "

The instances.

", + "smithy.api#xmlName": "instancesSet" } } }, @@ -89867,60 +90539,60 @@ "com.amazonaws.ec2#ReservedInstances": { "type": "structure", "members": { - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", + "CurrencyCode": { + "target": "com.amazonaws.ec2#CurrencyCodeValues", "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

The Availability Zone in which the Reserved Instance can be used.

", - "smithy.api#xmlName": "availabilityZone" + "aws.protocols#ec2QueryName": "CurrencyCode", + "smithy.api#documentation": "

The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes.\n\t\t\t\tAt this time, the only supported currency is USD.

", + "smithy.api#xmlName": "currencyCode" } }, - "Duration": { - "target": "com.amazonaws.ec2#Long", + "InstanceTenancy": { + "target": "com.amazonaws.ec2#Tenancy", "traits": { - "aws.protocols#ec2QueryName": "Duration", - "smithy.api#documentation": "

The duration of the Reserved Instance, in seconds.

", - "smithy.api#xmlName": "duration" + "aws.protocols#ec2QueryName": "InstanceTenancy", + "smithy.api#documentation": "

The tenancy of the instance.

", + "smithy.api#xmlName": "instanceTenancy" } }, - "End": { - "target": "com.amazonaws.ec2#DateTime", + "OfferingClass": { + "target": "com.amazonaws.ec2#OfferingClassType", "traits": { - "aws.protocols#ec2QueryName": "End", - "smithy.api#documentation": "

The time when the Reserved Instance expires.

", - "smithy.api#xmlName": "end" + "aws.protocols#ec2QueryName": "OfferingClass", + "smithy.api#documentation": "

The offering class of the Reserved Instance.

", + "smithy.api#xmlName": "offeringClass" } }, - "FixedPrice": { - "target": "com.amazonaws.ec2#Float", + "OfferingType": { + "target": "com.amazonaws.ec2#OfferingTypeValues", "traits": { - "aws.protocols#ec2QueryName": "FixedPrice", - "smithy.api#documentation": "

The purchase price of the Reserved Instance.

", - "smithy.api#xmlName": "fixedPrice" + "aws.protocols#ec2QueryName": "OfferingType", + "smithy.api#documentation": "

The Reserved Instance offering type.

", + "smithy.api#xmlName": "offeringType" } }, - "InstanceCount": { - "target": "com.amazonaws.ec2#Integer", + "RecurringCharges": { + "target": "com.amazonaws.ec2#RecurringChargesList", "traits": { - "aws.protocols#ec2QueryName": "InstanceCount", - "smithy.api#documentation": "

The number of reservations purchased.

", - "smithy.api#xmlName": "instanceCount" + "aws.protocols#ec2QueryName": "RecurringCharges", + "smithy.api#documentation": "

The recurring charge tag assigned to the resource.

", + "smithy.api#xmlName": "recurringCharges" } }, - "InstanceType": { - "target": "com.amazonaws.ec2#InstanceType", + "Scope": { + "target": "com.amazonaws.ec2#scope", "traits": { - "aws.protocols#ec2QueryName": "InstanceType", - "smithy.api#documentation": "

The instance type on which the Reserved Instance can be used.

", - "smithy.api#xmlName": "instanceType" + "aws.protocols#ec2QueryName": "Scope", + "smithy.api#documentation": "

The scope of the Reserved Instance.

", + "smithy.api#xmlName": "scope" } }, - "ProductDescription": { - "target": "com.amazonaws.ec2#RIProductDescription", + "Tags": { + "target": "com.amazonaws.ec2#TagList", "traits": { - "aws.protocols#ec2QueryName": "ProductDescription", - "smithy.api#documentation": "

The Reserved Instance product platform description.

", - "smithy.api#xmlName": "productDescription" + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Any tags assigned to the resource.

", + "smithy.api#xmlName": "tagSet" } }, "ReservedInstancesId": { @@ -89931,6 +90603,22 @@ "smithy.api#xmlName": "reservedInstancesId" } }, + "InstanceType": { + "target": "com.amazonaws.ec2#InstanceType", + "traits": { + "aws.protocols#ec2QueryName": "InstanceType", + "smithy.api#documentation": "

The instance type on which the Reserved Instance can be used.

", + "smithy.api#xmlName": "instanceType" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone in which the Reserved Instance can be used.

", + "smithy.api#xmlName": "availabilityZone" + } + }, "Start": { "target": "com.amazonaws.ec2#DateTime", "traits": { @@ -89939,12 +90627,20 @@ "smithy.api#xmlName": "start" } }, - "State": { - "target": "com.amazonaws.ec2#ReservedInstanceState", + "End": { + "target": "com.amazonaws.ec2#DateTime", "traits": { - "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The state of the Reserved Instance purchase.

", - "smithy.api#xmlName": "state" + "aws.protocols#ec2QueryName": "End", + "smithy.api#documentation": "

The time when the Reserved Instance expires.

", + "smithy.api#xmlName": "end" + } + }, + "Duration": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "aws.protocols#ec2QueryName": "Duration", + "smithy.api#documentation": "

The duration of the Reserved Instance, in seconds.

", + "smithy.api#xmlName": "duration" } }, "UsagePrice": { @@ -89955,60 +90651,36 @@ "smithy.api#xmlName": "usagePrice" } }, - "CurrencyCode": { - "target": "com.amazonaws.ec2#CurrencyCodeValues", - "traits": { - "aws.protocols#ec2QueryName": "CurrencyCode", - "smithy.api#documentation": "

The currency of the Reserved Instance. It's specified using ISO 4217 standard currency codes.\n\t\t\t\tAt this time, the only supported currency is USD.

", - "smithy.api#xmlName": "currencyCode" - } - }, - "InstanceTenancy": { - "target": "com.amazonaws.ec2#Tenancy", - "traits": { - "aws.protocols#ec2QueryName": "InstanceTenancy", - "smithy.api#documentation": "

The tenancy of the instance.

", - "smithy.api#xmlName": "instanceTenancy" - } - }, - "OfferingClass": { - "target": "com.amazonaws.ec2#OfferingClassType", - "traits": { - "aws.protocols#ec2QueryName": "OfferingClass", - "smithy.api#documentation": "

The offering class of the Reserved Instance.

", - "smithy.api#xmlName": "offeringClass" - } - }, - "OfferingType": { - "target": "com.amazonaws.ec2#OfferingTypeValues", + "FixedPrice": { + "target": "com.amazonaws.ec2#Float", "traits": { - "aws.protocols#ec2QueryName": "OfferingType", - "smithy.api#documentation": "

The Reserved Instance offering type.

", - "smithy.api#xmlName": "offeringType" + "aws.protocols#ec2QueryName": "FixedPrice", + "smithy.api#documentation": "

The purchase price of the Reserved Instance.

", + "smithy.api#xmlName": "fixedPrice" } }, - "RecurringCharges": { - "target": "com.amazonaws.ec2#RecurringChargesList", + "InstanceCount": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "RecurringCharges", - "smithy.api#documentation": "

The recurring charge tag assigned to the resource.

", - "smithy.api#xmlName": "recurringCharges" + "aws.protocols#ec2QueryName": "InstanceCount", + "smithy.api#documentation": "

The number of reservations purchased.

", + "smithy.api#xmlName": "instanceCount" } }, - "Scope": { - "target": "com.amazonaws.ec2#scope", + "ProductDescription": { + "target": "com.amazonaws.ec2#RIProductDescription", "traits": { - "aws.protocols#ec2QueryName": "Scope", - "smithy.api#documentation": "

The scope of the Reserved Instance.

", - "smithy.api#xmlName": "scope" + "aws.protocols#ec2QueryName": "ProductDescription", + "smithy.api#documentation": "

The Reserved Instance product platform description.

", + "smithy.api#xmlName": "productDescription" } }, - "Tags": { - "target": "com.amazonaws.ec2#TagList", + "State": { + "target": "com.amazonaws.ec2#ReservedInstanceState", "traits": { - "aws.protocols#ec2QueryName": "TagSet", - "smithy.api#documentation": "

Any tags assigned to the resource.

", - "smithy.api#xmlName": "tagSet" + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

The state of the Reserved Instance purchase.

", + "smithy.api#xmlName": "state" } } }, @@ -90344,62 +91016,6 @@ "com.amazonaws.ec2#ReservedInstancesOffering": { "type": "structure", "members": { - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

The Availability Zone in which the Reserved Instance can be used.

", - "smithy.api#xmlName": "availabilityZone" - } - }, - "Duration": { - "target": "com.amazonaws.ec2#Long", - "traits": { - "aws.protocols#ec2QueryName": "Duration", - "smithy.api#documentation": "

The duration of the Reserved Instance, in seconds.

", - "smithy.api#xmlName": "duration" - } - }, - "FixedPrice": { - "target": "com.amazonaws.ec2#Float", - "traits": { - "aws.protocols#ec2QueryName": "FixedPrice", - "smithy.api#documentation": "

The purchase price of the Reserved Instance.

", - "smithy.api#xmlName": "fixedPrice" - } - }, - "InstanceType": { - "target": "com.amazonaws.ec2#InstanceType", - "traits": { - "aws.protocols#ec2QueryName": "InstanceType", - "smithy.api#documentation": "

The instance type on which the Reserved Instance can be used.

", - "smithy.api#xmlName": "instanceType" - } - }, - "ProductDescription": { - "target": "com.amazonaws.ec2#RIProductDescription", - "traits": { - "aws.protocols#ec2QueryName": "ProductDescription", - "smithy.api#documentation": "

The Reserved Instance product platform description.

", - "smithy.api#xmlName": "productDescription" - } - }, - "ReservedInstancesOfferingId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ReservedInstancesOfferingId", - "smithy.api#documentation": "

The ID of the Reserved Instance offering. This is the offering ID used in GetReservedInstancesExchangeQuote \n to confirm that an exchange can be made.

", - "smithy.api#xmlName": "reservedInstancesOfferingId" - } - }, - "UsagePrice": { - "target": "com.amazonaws.ec2#Float", - "traits": { - "aws.protocols#ec2QueryName": "UsagePrice", - "smithy.api#documentation": "

The usage price of the Reserved Instance, per hour.

", - "smithy.api#xmlName": "usagePrice" - } - }, "CurrencyCode": { "target": "com.amazonaws.ec2#CurrencyCodeValues", "traits": { @@ -90463,6 +91079,62 @@ "smithy.api#documentation": "

Whether the Reserved Instance is applied to instances in a Region or an Availability Zone.

", "smithy.api#xmlName": "scope" } + }, + "ReservedInstancesOfferingId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ReservedInstancesOfferingId", + "smithy.api#documentation": "

The ID of the Reserved Instance offering. This is the offering ID used in GetReservedInstancesExchangeQuote \n to confirm that an exchange can be made.

", + "smithy.api#xmlName": "reservedInstancesOfferingId" + } + }, + "InstanceType": { + "target": "com.amazonaws.ec2#InstanceType", + "traits": { + "aws.protocols#ec2QueryName": "InstanceType", + "smithy.api#documentation": "

The instance type on which the Reserved Instance can be used.

", + "smithy.api#xmlName": "instanceType" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone in which the Reserved Instance can be used.

", + "smithy.api#xmlName": "availabilityZone" + } + }, + "Duration": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "aws.protocols#ec2QueryName": "Duration", + "smithy.api#documentation": "

The duration of the Reserved Instance, in seconds.

", + "smithy.api#xmlName": "duration" + } + }, + "UsagePrice": { + "target": "com.amazonaws.ec2#Float", + "traits": { + "aws.protocols#ec2QueryName": "UsagePrice", + "smithy.api#documentation": "

The usage price of the Reserved Instance, per hour.

", + "smithy.api#xmlName": "usagePrice" + } + }, + "FixedPrice": { + "target": "com.amazonaws.ec2#Float", + "traits": { + "aws.protocols#ec2QueryName": "FixedPrice", + "smithy.api#documentation": "

The purchase price of the Reserved Instance.

", + "smithy.api#xmlName": "fixedPrice" + } + }, + "ProductDescription": { + "target": "com.amazonaws.ec2#RIProductDescription", + "traits": { + "aws.protocols#ec2QueryName": "ProductDescription", + "smithy.api#documentation": "

The Reserved Instance product platform description.

", + "smithy.api#xmlName": "productDescription" + } } }, "traits": { @@ -90756,21 +91428,11 @@ "com.amazonaws.ec2#ResetInstanceAttributeRequest": { "type": "structure", "members": { - "Attribute": { - "target": "com.amazonaws.ec2#InstanceAttributeName", - "traits": { - "aws.protocols#ec2QueryName": "Attribute", - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The attribute to reset.

\n \n

You can only reset the following attributes: kernel |\n ramdisk | sourceDestCheck.

\n
", - "smithy.api#required": {}, - "smithy.api#xmlName": "attribute" - } - }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, @@ -90783,6 +91445,16 @@ "smithy.api#required": {}, "smithy.api#xmlName": "instanceId" } + }, + "Attribute": { + "target": "com.amazonaws.ec2#InstanceAttributeName", + "traits": { + "aws.protocols#ec2QueryName": "Attribute", + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The attribute to reset.

\n \n

You can only reset the following attributes: kernel |\n ramdisk | sourceDestCheck.

\n
", + "smithy.api#required": {}, + "smithy.api#xmlName": "attribute" + } } }, "traits": { @@ -91657,7 +92329,7 @@ "target": "com.amazonaws.ec2#ElasticGpuSpecificationResponseList", "traits": { "aws.protocols#ec2QueryName": "ElasticGpuSpecificationSet", - "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024. For \n workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, \n G4dn, or G5 instances.

\n
", + "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Graphics reached end of life on January 8, 2024.

\n
", "smithy.api#xmlName": "elasticGpuSpecificationSet" } }, @@ -91665,7 +92337,7 @@ "target": "com.amazonaws.ec2#LaunchTemplateElasticInferenceAcceleratorResponseList", "traits": { "aws.protocols#ec2QueryName": "ElasticInferenceAcceleratorSet", - "smithy.api#documentation": "

An elastic inference accelerator to associate with the instance. Elastic inference\n accelerators are a resource you can attach to your Amazon EC2 instances to accelerate\n your Deep Learning (DL) inference workloads.

\n

You cannot specify accelerators from different generations in the same request.

\n \n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon\n Elastic Inference (EI), and will help current customers migrate their workloads to\n options that offer better price and performance. After April 15, 2023, new customers\n will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker,\n Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during\n the past 30-day period are considered current customers and will be able to continue\n using the service.

\n
", + "smithy.api#documentation": "

Deprecated.

\n \n

Amazon Elastic Inference is no longer available.

\n
", "smithy.api#xmlName": "elasticInferenceAcceleratorSet" } }, @@ -92295,6 +92967,13 @@ "com.amazonaws.ec2#RevokeSecurityGroupEgressRequest": { "type": "structure", "members": { + "SecurityGroupRuleIds": { + "target": "com.amazonaws.ec2#SecurityGroupRuleIdList", + "traits": { + "smithy.api#documentation": "

The IDs of the security group rules.

", + "smithy.api#xmlName": "SecurityGroupRuleId" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -92313,27 +92992,28 @@ "smithy.api#xmlName": "groupId" } }, - "IpPermissions": { - "target": "com.amazonaws.ec2#IpPermissionList", + "SourceSecurityGroupName": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "IpPermissions", - "smithy.api#documentation": "

The sets of IP permissions. You can't specify a destination security group and a CIDR IP address range in the same set of permissions.

", - "smithy.api#xmlName": "ipPermissions" + "aws.protocols#ec2QueryName": "SourceSecurityGroupName", + "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify a\n destination security group.

", + "smithy.api#xmlName": "sourceSecurityGroupName" } }, - "SecurityGroupRuleIds": { - "target": "com.amazonaws.ec2#SecurityGroupRuleIdList", + "SourceSecurityGroupOwnerId": { + "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The IDs of the security group rules.

", - "smithy.api#xmlName": "SecurityGroupRuleId" + "aws.protocols#ec2QueryName": "SourceSecurityGroupOwnerId", + "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify a destination security\n group.

", + "smithy.api#xmlName": "sourceSecurityGroupOwnerId" } }, - "CidrIp": { + "IpProtocol": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "CidrIp", - "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify the CIDR.

", - "smithy.api#xmlName": "cidrIp" + "aws.protocols#ec2QueryName": "IpProtocol", + "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify the protocol name or\n number.

", + "smithy.api#xmlName": "ipProtocol" } }, "FromPort": { @@ -92344,14 +93024,6 @@ "smithy.api#xmlName": "fromPort" } }, - "IpProtocol": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "IpProtocol", - "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify the protocol name or\n number.

", - "smithy.api#xmlName": "ipProtocol" - } - }, "ToPort": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -92360,20 +93032,20 @@ "smithy.api#xmlName": "toPort" } }, - "SourceSecurityGroupName": { + "CidrIp": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "SourceSecurityGroupName", - "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify a\n destination security group.

", - "smithy.api#xmlName": "sourceSecurityGroupName" + "aws.protocols#ec2QueryName": "CidrIp", + "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify the CIDR.

", + "smithy.api#xmlName": "cidrIp" } }, - "SourceSecurityGroupOwnerId": { - "target": "com.amazonaws.ec2#String", + "IpPermissions": { + "target": "com.amazonaws.ec2#IpPermissionList", "traits": { - "aws.protocols#ec2QueryName": "SourceSecurityGroupOwnerId", - "smithy.api#documentation": "

Not supported. Use a set of IP permissions to specify a destination security\n group.

", - "smithy.api#xmlName": "sourceSecurityGroupOwnerId" + "aws.protocols#ec2QueryName": "IpPermissions", + "smithy.api#documentation": "

The sets of IP permissions. You can't specify a destination security group and a CIDR IP address range in the same set of permissions.

", + "smithy.api#xmlName": "ipPermissions" } } }, @@ -92474,6 +93146,13 @@ "smithy.api#documentation": "

If the protocol is TCP or UDP, this is the end of the port range.\n If the protocol is ICMP, this is the ICMP code or -1 (all ICMP codes).

" } }, + "SecurityGroupRuleIds": { + "target": "com.amazonaws.ec2#SecurityGroupRuleIdList", + "traits": { + "smithy.api#documentation": "

The IDs of the security group rules.

", + "smithy.api#xmlName": "SecurityGroupRuleId" + } + }, "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -92481,13 +93160,6 @@ "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } - }, - "SecurityGroupRuleIds": { - "target": "com.amazonaws.ec2#SecurityGroupRuleIdList", - "traits": { - "smithy.api#documentation": "

The IDs of the security group rules.

", - "smithy.api#xmlName": "SecurityGroupRuleId" - } } }, "traits": { @@ -93237,79 +93909,6 @@ "smithy.api#documentation": "

The user data to make available to the instance. User data must be base64-encoded.\n Depending on the tool or SDK that you're using, the base64-encoding might be performed for you.\n For more information, see Work with instance user data.

" } }, - "AdditionalInfo": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AdditionalInfo", - "smithy.api#documentation": "

Reserved.

", - "smithy.api#xmlName": "additionalInfo" - } - }, - "ClientToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "ClientToken", - "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the\n request. If you do not specify a client token, a randomly generated token is used for\n the request to ensure idempotency.

\n

For more information, see Ensuring\n Idempotency.

\n

Constraints: Maximum 64 ASCII characters

", - "smithy.api#idempotencyToken": {}, - "smithy.api#xmlName": "clientToken" - } - }, - "DisableApiTermination": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DisableApiTermination", - "smithy.api#documentation": "

If you set this parameter to true, you can't terminate the instance using\n the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after\n launch, use ModifyInstanceAttribute. Alternatively, if you set\n InstanceInitiatedShutdownBehavior to terminate, you can\n terminate the instance by running the shutdown command from the instance.

\n

Default: false\n

", - "smithy.api#xmlName": "disableApiTermination" - } - }, - "DryRun": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", - "smithy.api#xmlName": "dryRun" - } - }, - "EbsOptimized": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "EbsOptimized", - "smithy.api#documentation": "

Indicates whether the instance is optimized for Amazon EBS I/O. This optimization\n provides dedicated throughput to Amazon EBS and an optimized configuration stack to\n provide optimal Amazon EBS I/O performance. This optimization isn't available with all\n instance types. Additional usage charges apply when using an EBS-optimized\n instance.

\n

Default: false\n

", - "smithy.api#xmlName": "ebsOptimized" - } - }, - "IamInstanceProfile": { - "target": "com.amazonaws.ec2#IamInstanceProfileSpecification", - "traits": { - "aws.protocols#ec2QueryName": "IamInstanceProfile", - "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of an IAM instance\n profile.

", - "smithy.api#xmlName": "iamInstanceProfile" - } - }, - "InstanceInitiatedShutdownBehavior": { - "target": "com.amazonaws.ec2#ShutdownBehavior", - "traits": { - "aws.protocols#ec2QueryName": "InstanceInitiatedShutdownBehavior", - "smithy.api#documentation": "

Indicates whether an instance stops or terminates when you initiate shutdown from the\n instance (using the operating system command for system shutdown).

\n

Default: stop\n

", - "smithy.api#xmlName": "instanceInitiatedShutdownBehavior" - } - }, - "NetworkInterfaces": { - "target": "com.amazonaws.ec2#InstanceNetworkInterfaceSpecificationList", - "traits": { - "aws.protocols#ec2QueryName": "NetworkInterface", - "smithy.api#documentation": "

The network interfaces to associate with the instance.

", - "smithy.api#xmlName": "networkInterface" - } - }, - "PrivateIpAddress": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PrivateIpAddress", - "smithy.api#documentation": "

The primary IPv4 address. You must specify a value from the IPv4 address\n range of the subnet.

\n

Only one private IP address can be designated as primary. You can't specify this\n option if you've specified the option to designate a private IP address as the primary\n IP address in a network interface specification. You cannot specify this option if\n you're launching more than one instance in the request.

\n

You cannot specify this option and the network interfaces option in the same\n request.

", - "smithy.api#xmlName": "privateIpAddress" - } - }, "ElasticGpuSpecification": { "target": "com.amazonaws.ec2#ElasticGpuSpecifications", "traits": { @@ -93319,7 +93918,7 @@ "ElasticInferenceAccelerators": { "target": "com.amazonaws.ec2#ElasticInferenceAccelerators", "traits": { - "smithy.api#documentation": "

An elastic inference accelerator to associate with the instance.

\n \n

Amazon Elastic Inference (EI) is no longer available to new customers. For more\n information, see Amazon Elastic Inference FAQs.

\n
", + "smithy.api#documentation": "

An elastic inference accelerator to associate with the instance.

\n \n

Amazon Elastic Inference is no longer available.

\n
", "smithy.api#xmlName": "ElasticInferenceAccelerator" } }, @@ -93408,6 +94007,79 @@ "traits": { "smithy.api#documentation": "

If you’re launching an instance into a dual-stack or IPv6-only subnet, you can enable\n assigning a primary IPv6 address. A primary IPv6 address is an IPv6 GUA address\n associated with an ENI that you have enabled to use a primary IPv6 address. Use this\n option if an instance relies on its IPv6 address not changing. When you launch the\n instance, Amazon Web Services will automatically assign an IPv6 address associated with\n the ENI attached to your instance to be the primary IPv6 address. Once you enable an\n IPv6 GUA address to be a primary IPv6, you cannot disable it. When you enable an IPv6\n GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6\n address until the instance is terminated or the network interface is detached. If you\n have multiple IPv6 addresses associated with an ENI attached to your instance and you\n enable a primary IPv6 address, the first IPv6 GUA address associated with the ENI\n becomes the primary IPv6 address.

" } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DryRun", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "smithy.api#xmlName": "dryRun" + } + }, + "DisableApiTermination": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DisableApiTermination", + "smithy.api#documentation": "

If you set this parameter to true, you can't terminate the instance using\n the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after\n launch, use ModifyInstanceAttribute. Alternatively, if you set\n InstanceInitiatedShutdownBehavior to terminate, you can\n terminate the instance by running the shutdown command from the instance.

\n

Default: false\n

", + "smithy.api#xmlName": "disableApiTermination" + } + }, + "InstanceInitiatedShutdownBehavior": { + "target": "com.amazonaws.ec2#ShutdownBehavior", + "traits": { + "aws.protocols#ec2QueryName": "InstanceInitiatedShutdownBehavior", + "smithy.api#documentation": "

Indicates whether an instance stops or terminates when you initiate shutdown from the\n instance (using the operating system command for system shutdown).

\n

Default: stop\n

", + "smithy.api#xmlName": "instanceInitiatedShutdownBehavior" + } + }, + "PrivateIpAddress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PrivateIpAddress", + "smithy.api#documentation": "

The primary IPv4 address. You must specify a value from the IPv4 address\n range of the subnet.

\n

Only one private IP address can be designated as primary. You can't specify this\n option if you've specified the option to designate a private IP address as the primary\n IP address in a network interface specification. You cannot specify this option if\n you're launching more than one instance in the request.

\n

You cannot specify this option and the network interfaces option in the same\n request.

", + "smithy.api#xmlName": "privateIpAddress" + } + }, + "ClientToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "ClientToken", + "smithy.api#documentation": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the\n request. If you do not specify a client token, a randomly generated token is used for\n the request to ensure idempotency.

\n

For more information, see Ensuring\n Idempotency.

\n

Constraints: Maximum 64 ASCII characters

", + "smithy.api#idempotencyToken": {}, + "smithy.api#xmlName": "clientToken" + } + }, + "AdditionalInfo": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AdditionalInfo", + "smithy.api#documentation": "

Reserved.

", + "smithy.api#xmlName": "additionalInfo" + } + }, + "NetworkInterfaces": { + "target": "com.amazonaws.ec2#InstanceNetworkInterfaceSpecificationList", + "traits": { + "aws.protocols#ec2QueryName": "NetworkInterface", + "smithy.api#documentation": "

The network interfaces to associate with the instance.

", + "smithy.api#xmlName": "networkInterface" + } + }, + "IamInstanceProfile": { + "target": "com.amazonaws.ec2#IamInstanceProfileSpecification", + "traits": { + "aws.protocols#ec2QueryName": "IamInstanceProfile", + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of an IAM instance\n profile.

", + "smithy.api#xmlName": "iamInstanceProfile" + } + }, + "EbsOptimized": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "EbsOptimized", + "smithy.api#documentation": "

Indicates whether the instance is optimized for Amazon EBS I/O. This optimization\n provides dedicated throughput to Amazon EBS and an optimized configuration stack to\n provide optimal Amazon EBS I/O performance. This optimization isn't available with all\n instance types. Additional usage charges apply when using an EBS-optimized\n instance.

\n

Default: false\n

", + "smithy.api#xmlName": "ebsOptimized" + } } }, "traits": { @@ -94577,38 +95249,6 @@ "com.amazonaws.ec2#SecurityGroup": { "type": "structure", "members": { - "Description": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "GroupDescription", - "smithy.api#documentation": "

A description of the security group.

", - "smithy.api#xmlName": "groupDescription" - } - }, - "GroupName": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "GroupName", - "smithy.api#documentation": "

The name of the security group.

", - "smithy.api#xmlName": "groupName" - } - }, - "IpPermissions": { - "target": "com.amazonaws.ec2#IpPermissionList", - "traits": { - "aws.protocols#ec2QueryName": "IpPermissions", - "smithy.api#documentation": "

The inbound rules associated with the security group.

", - "smithy.api#xmlName": "ipPermissions" - } - }, - "OwnerId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "OwnerId", - "smithy.api#documentation": "

The Amazon Web Services account ID of the owner of the security group.

", - "smithy.api#xmlName": "ownerId" - } - }, "GroupId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -94640,6 +95280,38 @@ "smithy.api#documentation": "

The ID of the VPC for the security group.

", "smithy.api#xmlName": "vpcId" } + }, + "OwnerId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "OwnerId", + "smithy.api#documentation": "

The Amazon Web Services account ID of the owner of the security group.

", + "smithy.api#xmlName": "ownerId" + } + }, + "GroupName": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "GroupName", + "smithy.api#documentation": "

The name of the security group.

", + "smithy.api#xmlName": "groupName" + } + }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "GroupDescription", + "smithy.api#documentation": "

A description of the security group.

", + "smithy.api#xmlName": "groupDescription" + } + }, + "IpPermissions": { + "target": "com.amazonaws.ec2#IpPermissionList", + "traits": { + "aws.protocols#ec2QueryName": "IpPermissions", + "smithy.api#documentation": "

The inbound rules associated with the security group.

", + "smithy.api#xmlName": "ipPermissions" + } } }, "traits": { @@ -95159,7 +95831,7 @@ "DryRun": { "target": "com.amazonaws.ec2#Boolean", "traits": { - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

" + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

" } } }, @@ -95617,52 +96289,52 @@ "com.amazonaws.ec2#Snapshot": { "type": "structure", "members": { - "DataEncryptionKeyId": { + "OwnerAlias": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "DataEncryptionKeyId", - "smithy.api#documentation": "

The data encryption key identifier for the snapshot. This value is a unique identifier\n that corresponds to the data encryption key that was used to encrypt the original volume or\n snapshot copy. Because data encryption keys are inherited by volumes created from snapshots,\n and vice versa, if snapshots share the same data encryption key identifier, then they belong\n to the same volume/snapshot lineage. This parameter is only returned by DescribeSnapshots.

", - "smithy.api#xmlName": "dataEncryptionKeyId" + "aws.protocols#ec2QueryName": "OwnerAlias", + "smithy.api#documentation": "

The Amazon Web Services owner alias, from an Amazon-maintained list (amazon). This is not \n the user-configured Amazon Web Services account alias set using the IAM console.

", + "smithy.api#xmlName": "ownerAlias" } }, - "Description": { + "OutpostArn": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

The description for the snapshot.

", - "smithy.api#xmlName": "description" + "aws.protocols#ec2QueryName": "OutpostArn", + "smithy.api#documentation": "

The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the \n \t\tAmazon EBS User Guide.

", + "smithy.api#xmlName": "outpostArn" } }, - "Encrypted": { - "target": "com.amazonaws.ec2#Boolean", + "Tags": { + "target": "com.amazonaws.ec2#TagList", "traits": { - "aws.protocols#ec2QueryName": "Encrypted", - "smithy.api#documentation": "

Indicates whether the snapshot is encrypted.

", - "smithy.api#xmlName": "encrypted" + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Any tags assigned to the snapshot.

", + "smithy.api#xmlName": "tagSet" } }, - "KmsKeyId": { - "target": "com.amazonaws.ec2#String", + "StorageTier": { + "target": "com.amazonaws.ec2#StorageTier", "traits": { - "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that was used to protect the\n volume encryption key for the parent volume.

", - "smithy.api#xmlName": "kmsKeyId" + "aws.protocols#ec2QueryName": "StorageTier", + "smithy.api#documentation": "

The storage tier in which the snapshot is stored. standard indicates \n that the snapshot is stored in the standard snapshot storage tier and that it is ready \n for use. archive indicates that the snapshot is currently archived and that \n it must be restored before it can be used.

", + "smithy.api#xmlName": "storageTier" } }, - "OwnerId": { - "target": "com.amazonaws.ec2#String", + "RestoreExpiryTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", "traits": { - "aws.protocols#ec2QueryName": "OwnerId", - "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the EBS snapshot.

", - "smithy.api#xmlName": "ownerId" + "aws.protocols#ec2QueryName": "RestoreExpiryTime", + "smithy.api#documentation": "

Only for archived snapshots that are temporarily restored. Indicates the date and \n time when a temporarily restored snapshot will be automatically re-archived.

", + "smithy.api#xmlName": "restoreExpiryTime" } }, - "Progress": { - "target": "com.amazonaws.ec2#String", + "SseType": { + "target": "com.amazonaws.ec2#SSEType", "traits": { - "aws.protocols#ec2QueryName": "Progress", - "smithy.api#documentation": "

The progress of the snapshot, as a percentage.

", - "smithy.api#xmlName": "progress" + "aws.protocols#ec2QueryName": "SseType", + "smithy.api#documentation": "

Reserved for future use.

", + "smithy.api#xmlName": "sseType" } }, "SnapshotId": { @@ -95673,12 +96345,12 @@ "smithy.api#xmlName": "snapshotId" } }, - "StartTime": { - "target": "com.amazonaws.ec2#DateTime", + "VolumeId": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "StartTime", - "smithy.api#documentation": "

The time stamp when the snapshot was initiated.

", - "smithy.api#xmlName": "startTime" + "aws.protocols#ec2QueryName": "VolumeId", + "smithy.api#documentation": "

The ID of the volume that was used to create the snapshot. Snapshots created by the CopySnapshot action have an arbitrary volume ID that should not be used for any\n purpose.

", + "smithy.api#xmlName": "volumeId" } }, "State": { @@ -95697,68 +96369,68 @@ "smithy.api#xmlName": "statusMessage" } }, - "VolumeId": { - "target": "com.amazonaws.ec2#String", + "StartTime": { + "target": "com.amazonaws.ec2#DateTime", "traits": { - "aws.protocols#ec2QueryName": "VolumeId", - "smithy.api#documentation": "

The ID of the volume that was used to create the snapshot. Snapshots created by the CopySnapshot action have an arbitrary volume ID that should not be used for any\n purpose.

", - "smithy.api#xmlName": "volumeId" + "aws.protocols#ec2QueryName": "StartTime", + "smithy.api#documentation": "

The time stamp when the snapshot was initiated.

", + "smithy.api#xmlName": "startTime" } }, - "VolumeSize": { - "target": "com.amazonaws.ec2#Integer", + "Progress": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "VolumeSize", - "smithy.api#documentation": "

The size of the volume, in GiB.

", - "smithy.api#xmlName": "volumeSize" + "aws.protocols#ec2QueryName": "Progress", + "smithy.api#documentation": "

The progress of the snapshot, as a percentage.

", + "smithy.api#xmlName": "progress" } }, - "OwnerAlias": { + "OwnerId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "OwnerAlias", - "smithy.api#documentation": "

The Amazon Web Services owner alias, from an Amazon-maintained list (amazon). This is not \n the user-configured Amazon Web Services account alias set using the IAM console.

", - "smithy.api#xmlName": "ownerAlias" + "aws.protocols#ec2QueryName": "OwnerId", + "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the EBS snapshot.

", + "smithy.api#xmlName": "ownerId" } }, - "OutpostArn": { + "Description": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "OutpostArn", - "smithy.api#documentation": "

The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the \n \t\tAmazon EBS User Guide.

", - "smithy.api#xmlName": "outpostArn" + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

The description for the snapshot.

", + "smithy.api#xmlName": "description" } }, - "Tags": { - "target": "com.amazonaws.ec2#TagList", + "VolumeSize": { + "target": "com.amazonaws.ec2#Integer", "traits": { - "aws.protocols#ec2QueryName": "TagSet", - "smithy.api#documentation": "

Any tags assigned to the snapshot.

", - "smithy.api#xmlName": "tagSet" + "aws.protocols#ec2QueryName": "VolumeSize", + "smithy.api#documentation": "

The size of the volume, in GiB.

", + "smithy.api#xmlName": "volumeSize" } }, - "StorageTier": { - "target": "com.amazonaws.ec2#StorageTier", + "Encrypted": { + "target": "com.amazonaws.ec2#Boolean", "traits": { - "aws.protocols#ec2QueryName": "StorageTier", - "smithy.api#documentation": "

The storage tier in which the snapshot is stored. standard indicates \n that the snapshot is stored in the standard snapshot storage tier and that it is ready \n for use. archive indicates that the snapshot is currently archived and that \n it must be restored before it can be used.

", - "smithy.api#xmlName": "storageTier" + "aws.protocols#ec2QueryName": "Encrypted", + "smithy.api#documentation": "

Indicates whether the snapshot is encrypted.

", + "smithy.api#xmlName": "encrypted" } }, - "RestoreExpiryTime": { - "target": "com.amazonaws.ec2#MillisecondDateTime", + "KmsKeyId": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "RestoreExpiryTime", - "smithy.api#documentation": "

Only for archived snapshots that are temporarily restored. Indicates the date and \n time when a temporarily restored snapshot will be automatically re-archived.

", - "smithy.api#xmlName": "restoreExpiryTime" + "aws.protocols#ec2QueryName": "KmsKeyId", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that was used to protect the\n volume encryption key for the parent volume.

", + "smithy.api#xmlName": "kmsKeyId" } }, - "SseType": { - "target": "com.amazonaws.ec2#SSEType", + "DataEncryptionKeyId": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "SseType", - "smithy.api#documentation": "

Reserved for future use.

", - "smithy.api#xmlName": "sseType" + "aws.protocols#ec2QueryName": "DataEncryptionKeyId", + "smithy.api#documentation": "

The data encryption key identifier for the snapshot. This value is a unique identifier\n that corresponds to the data encryption key that was used to encrypt the original volume or\n snapshot copy. Because data encryption keys are inherited by volumes created from snapshots,\n and vice versa, if snapshots share the same data encryption key identifier, then they belong\n to the same volume/snapshot lineage. This parameter is only returned by DescribeSnapshots.

", + "smithy.api#xmlName": "dataEncryptionKeyId" } } }, @@ -96168,7 +96840,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Description", - "smithy.api#documentation": "

The description of the snapshot.

", + "smithy.api#documentation": "

The description of the disk image being imported.

", "smithy.api#xmlName": "description" } }, @@ -96471,14 +97143,6 @@ "com.amazonaws.ec2#SpotFleetLaunchSpecification": { "type": "structure", "members": { - "SecurityGroups": { - "target": "com.amazonaws.ec2#GroupIdentifierList", - "traits": { - "aws.protocols#ec2QueryName": "GroupSet", - "smithy.api#documentation": "

The security groups.

\n

If you specify a network interface, you must specify any security groups as part of\n the network interface instead of using this parameter.

", - "smithy.api#xmlName": "groupSet" - } - }, "AddressingType": { "target": "com.amazonaws.ec2#String", "traits": { @@ -96622,6 +97286,14 @@ "smithy.api#documentation": "

The attributes for the instance types. When you specify instance attributes, Amazon EC2 will\n identify instance types with those attributes.

\n \n

If you specify InstanceRequirements, you can't specify\n InstanceType.

\n
", "smithy.api#xmlName": "instanceRequirements" } + }, + "SecurityGroups": { + "target": "com.amazonaws.ec2#GroupIdentifierList", + "traits": { + "aws.protocols#ec2QueryName": "GroupSet", + "smithy.api#documentation": "

The security groups.

\n

If you specify a network interface, you must specify any security groups as part of\n the network interface instead of using this parameter.

", + "smithy.api#xmlName": "groupSet" + } } }, "traits": { @@ -97830,7 +98502,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -98279,7 +98951,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } }, @@ -98461,14 +99133,6 @@ "com.amazonaws.ec2#Subnet": { "type": "structure", "members": { - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

The Availability Zone of the subnet.

", - "smithy.api#xmlName": "availabilityZone" - } - }, "AvailabilityZoneId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -98477,30 +99141,6 @@ "smithy.api#xmlName": "availabilityZoneId" } }, - "AvailableIpAddressCount": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "AvailableIpAddressCount", - "smithy.api#documentation": "

The number of unused private IPv4 addresses in the subnet. The IPv4 addresses for any\n\t\t\tstopped instances are considered unavailable.

", - "smithy.api#xmlName": "availableIpAddressCount" - } - }, - "CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CidrBlock", - "smithy.api#documentation": "

The IPv4 CIDR block assigned to the subnet.

", - "smithy.api#xmlName": "cidrBlock" - } - }, - "DefaultForAz": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "DefaultForAz", - "smithy.api#documentation": "

Indicates whether this is the default subnet for the Availability Zone.

", - "smithy.api#xmlName": "defaultForAz" - } - }, "EnableLniAtDeviceIndex": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -98509,14 +99149,6 @@ "smithy.api#xmlName": "enableLniAtDeviceIndex" } }, - "MapPublicIpOnLaunch": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "MapPublicIpOnLaunch", - "smithy.api#documentation": "

Indicates whether instances launched in this subnet receive a public IPv4 address.

\n

Amazon Web Services charges for all public IPv4 addresses, including public IPv4 addresses \nassociated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page.

", - "smithy.api#xmlName": "mapPublicIpOnLaunch" - } - }, "MapCustomerOwnedIpOnLaunch": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -98533,30 +99165,6 @@ "smithy.api#xmlName": "customerOwnedIpv4Pool" } }, - "State": { - "target": "com.amazonaws.ec2#SubnetState", - "traits": { - "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The current state of the subnet.

", - "smithy.api#xmlName": "state" - } - }, - "SubnetId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "SubnetId", - "smithy.api#documentation": "

The ID of the subnet.

", - "smithy.api#xmlName": "subnetId" - } - }, - "VpcId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpcId", - "smithy.api#documentation": "

The ID of the VPC the subnet is in.

", - "smithy.api#xmlName": "vpcId" - } - }, "OwnerId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -98628,6 +99236,70 @@ "smithy.api#documentation": "

The type of hostnames to assign to instances in the subnet at launch. An instance hostname\n is based on the IPv4 address or ID of the instance.

", "smithy.api#xmlName": "privateDnsNameOptionsOnLaunch" } + }, + "SubnetId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SubnetId", + "smithy.api#documentation": "

The ID of the subnet.

", + "smithy.api#xmlName": "subnetId" + } + }, + "State": { + "target": "com.amazonaws.ec2#SubnetState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

The current state of the subnet.

", + "smithy.api#xmlName": "state" + } + }, + "VpcId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "

The ID of the VPC the subnet is in.

", + "smithy.api#xmlName": "vpcId" + } + }, + "CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CidrBlock", + "smithy.api#documentation": "

The IPv4 CIDR block assigned to the subnet.

", + "smithy.api#xmlName": "cidrBlock" + } + }, + "AvailableIpAddressCount": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "AvailableIpAddressCount", + "smithy.api#documentation": "

The number of unused private IPv4 addresses in the subnet. The IPv4 addresses for any\n\t\t\tstopped instances are considered unavailable.

", + "smithy.api#xmlName": "availableIpAddressCount" + } + }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone of the subnet.

", + "smithy.api#xmlName": "availabilityZone" + } + }, + "DefaultForAz": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "DefaultForAz", + "smithy.api#documentation": "

Indicates whether this is the default subnet for the Availability Zone.

", + "smithy.api#xmlName": "defaultForAz" + } + }, + "MapPublicIpOnLaunch": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "MapPublicIpOnLaunch", + "smithy.api#documentation": "

Indicates whether instances launched in this subnet receive a public IPv4 address.

\n

Amazon Web Services charges for all public IPv4 addresses, including public IPv4 addresses \nassociated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page.

", + "smithy.api#xmlName": "mapPublicIpOnLaunch" + } } }, "traits": { @@ -99793,7 +100465,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -102072,7 +102744,7 @@ "target": "com.amazonaws.ec2#SecurityGroupReferencingSupportValue", "traits": { "aws.protocols#ec2QueryName": "SecurityGroupReferencingSupport", - "smithy.api#documentation": "\n

This parameter is in preview and may not be available for your account.

\n
\n

Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.

", + "smithy.api#documentation": "

Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management.\n\n

\n

This option is disabled by default.

", "smithy.api#xmlName": "securityGroupReferencingSupport" } }, @@ -102723,7 +103395,7 @@ "SecurityGroupReferencingSupport": { "target": "com.amazonaws.ec2#SecurityGroupReferencingSupportValue", "traits": { - "smithy.api#documentation": "\n

This parameter is in preview and may not be available for your account.

\n
\n

Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.

" + "smithy.api#documentation": "

Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management.\n\n

\n

This option is disabled by default.

\n

For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide.

" } }, "MulticastSupport": { @@ -103508,7 +104180,7 @@ "target": "com.amazonaws.ec2#SecurityGroupReferencingSupportValue", "traits": { "aws.protocols#ec2QueryName": "SecurityGroupReferencingSupport", - "smithy.api#documentation": "\n

This parameter is in preview and may not be available for your account.

\n
\n

Enables you to reference a security group across VPCs attached to a transit gateway. Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.

", + "smithy.api#documentation": "

Enables you to reference a security group across VPCs attached to a transit gateway to simplify security group management.

\n

This option is enabled by default.

\n

For more information about security group referencing, see Security group referencing in the Amazon Web Services Transit Gateways Guide.

", "smithy.api#xmlName": "securityGroupReferencingSupport" } }, @@ -103869,14 +104541,6 @@ "com.amazonaws.ec2#UnassignIpv6AddressesRequest": { "type": "structure", "members": { - "Ipv6Addresses": { - "target": "com.amazonaws.ec2#Ipv6AddressList", - "traits": { - "aws.protocols#ec2QueryName": "Ipv6Addresses", - "smithy.api#documentation": "

The IPv6 addresses to unassign from the network interface.

", - "smithy.api#xmlName": "ipv6Addresses" - } - }, "Ipv6Prefixes": { "target": "com.amazonaws.ec2#IpPrefixList", "traits": { @@ -103893,6 +104557,14 @@ "smithy.api#required": {}, "smithy.api#xmlName": "networkInterfaceId" } + }, + "Ipv6Addresses": { + "target": "com.amazonaws.ec2#Ipv6AddressList", + "traits": { + "aws.protocols#ec2QueryName": "Ipv6Addresses", + "smithy.api#documentation": "

The IPv6 addresses to unassign from the network interface.

", + "smithy.api#xmlName": "ipv6Addresses" + } } }, "traits": { @@ -103958,6 +104630,13 @@ "com.amazonaws.ec2#UnassignPrivateIpAddressesRequest": { "type": "structure", "members": { + "Ipv4Prefixes": { + "target": "com.amazonaws.ec2#IpPrefixList", + "traits": { + "smithy.api#documentation": "

The IPv4 prefixes to unassign from the network interface.

", + "smithy.api#xmlName": "Ipv4Prefix" + } + }, "NetworkInterfaceId": { "target": "com.amazonaws.ec2#NetworkInterfaceId", "traits": { @@ -103975,13 +104654,6 @@ "smithy.api#documentation": "

The secondary private IP addresses to unassign from the network interface. You can specify this \n \toption multiple times to unassign more than one IP address.

", "smithy.api#xmlName": "privateIpAddress" } - }, - "Ipv4Prefixes": { - "target": "com.amazonaws.ec2#IpPrefixList", - "traits": { - "smithy.api#documentation": "

The IPv4 prefixes to unassign from the network interface.

", - "smithy.api#xmlName": "Ipv4Prefix" - } } }, "traits": { @@ -104169,7 +104841,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "DryRun", - "smithy.api#documentation": "

Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

", + "smithy.api#documentation": "

Checks whether you have the required permissions for the operation, without actually making the \n request, and provides an error response. If you have the required permissions, the error response is \n DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "smithy.api#xmlName": "dryRun" } } @@ -104639,12 +105311,12 @@ "smithy.api#xmlName": "description" } }, - "GroupId": { + "UserId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "GroupId", - "smithy.api#documentation": "

The ID of the security group.

", - "smithy.api#xmlName": "groupId" + "aws.protocols#ec2QueryName": "UserId", + "smithy.api#documentation": "

The ID of an Amazon Web Services account.

\n

For a referenced security group in another VPC, the account ID of the referenced\n security group is returned in the response. If the referenced security group is deleted,\n this value is not returned.

", + "smithy.api#xmlName": "userId" } }, "GroupName": { @@ -104655,20 +105327,12 @@ "smithy.api#xmlName": "groupName" } }, - "PeeringStatus": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "PeeringStatus", - "smithy.api#documentation": "

The status of a VPC peering connection, if applicable.

", - "smithy.api#xmlName": "peeringStatus" - } - }, - "UserId": { + "GroupId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "UserId", - "smithy.api#documentation": "

The ID of an Amazon Web Services account.

\n

For a referenced security group in another VPC, the account ID of the referenced\n security group is returned in the response. If the referenced security group is deleted,\n this value is not returned.

", - "smithy.api#xmlName": "userId" + "aws.protocols#ec2QueryName": "GroupId", + "smithy.api#documentation": "

The ID of the security group.

", + "smithy.api#xmlName": "groupId" } }, "VpcId": { @@ -104686,6 +105350,14 @@ "smithy.api#documentation": "

The ID of the VPC peering connection, if applicable.

", "smithy.api#xmlName": "vpcPeeringConnectionId" } + }, + "PeeringStatus": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "PeeringStatus", + "smithy.api#documentation": "

The status of a VPC peering connection, if applicable.

", + "smithy.api#xmlName": "peeringStatus" + } } }, "traits": { @@ -106171,46 +106843,6 @@ "com.amazonaws.ec2#Volume": { "type": "structure", "members": { - "Attachments": { - "target": "com.amazonaws.ec2#VolumeAttachmentList", - "traits": { - "aws.protocols#ec2QueryName": "AttachmentSet", - "smithy.api#documentation": "\n

This parameter is not returned by CreateVolume.

\n
\n

Information about the volume attachments.

", - "smithy.api#xmlName": "attachmentSet" - } - }, - "AvailabilityZone": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

The Availability Zone for the volume.

", - "smithy.api#xmlName": "availabilityZone" - } - }, - "CreateTime": { - "target": "com.amazonaws.ec2#DateTime", - "traits": { - "aws.protocols#ec2QueryName": "CreateTime", - "smithy.api#documentation": "

The time stamp when volume creation was initiated.

", - "smithy.api#xmlName": "createTime" - } - }, - "Encrypted": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "Encrypted", - "smithy.api#documentation": "

Indicates whether the volume is encrypted.

", - "smithy.api#xmlName": "encrypted" - } - }, - "KmsKeyId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that was used to protect the\n volume encryption key for the volume.

", - "smithy.api#xmlName": "kmsKeyId" - } - }, "OutpostArn": { "target": "com.amazonaws.ec2#String", "traits": { @@ -106219,38 +106851,6 @@ "smithy.api#xmlName": "outpostArn" } }, - "Size": { - "target": "com.amazonaws.ec2#Integer", - "traits": { - "aws.protocols#ec2QueryName": "Size", - "smithy.api#documentation": "

The size of the volume, in GiBs.

", - "smithy.api#xmlName": "size" - } - }, - "SnapshotId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "SnapshotId", - "smithy.api#documentation": "

The snapshot from which the volume was created, if applicable.

", - "smithy.api#xmlName": "snapshotId" - } - }, - "State": { - "target": "com.amazonaws.ec2#VolumeState", - "traits": { - "aws.protocols#ec2QueryName": "Status", - "smithy.api#documentation": "

The volume state.

", - "smithy.api#xmlName": "status" - } - }, - "VolumeId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VolumeId", - "smithy.api#documentation": "

The ID of the volume.

", - "smithy.api#xmlName": "volumeId" - } - }, "Iops": { "target": "com.amazonaws.ec2#Integer", "traits": { @@ -106306,55 +106906,87 @@ "smithy.api#documentation": "\n

This parameter is not returned by CreateVolume.

\n
\n

Reserved for future use.

", "smithy.api#xmlName": "sseType" } - } - }, - "traits": { - "smithy.api#documentation": "

Describes a volume.

" - } - }, - "com.amazonaws.ec2#VolumeAttachment": { - "type": "structure", - "members": { - "AttachTime": { - "target": "com.amazonaws.ec2#DateTime", + }, + "VolumeId": { + "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "AttachTime", - "smithy.api#documentation": "

The time stamp when the attachment initiated.

", - "smithy.api#xmlName": "attachTime" + "aws.protocols#ec2QueryName": "VolumeId", + "smithy.api#documentation": "

The ID of the volume.

", + "smithy.api#xmlName": "volumeId" } }, - "Device": { + "Size": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "Size", + "smithy.api#documentation": "

The size of the volume, in GiBs.

", + "smithy.api#xmlName": "size" + } + }, + "SnapshotId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "Device", - "smithy.api#documentation": "

The device name.

\n

If the volume is attached to a Fargate task, this parameter \n returns null.

", - "smithy.api#xmlName": "device" + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

The snapshot from which the volume was created, if applicable.

", + "smithy.api#xmlName": "snapshotId" } }, - "InstanceId": { + "AvailabilityZone": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "InstanceId", - "smithy.api#documentation": "

The ID of the instance.

\n

If the volume is attached to a Fargate task, this parameter \n returns null.

", - "smithy.api#xmlName": "instanceId" + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone for the volume.

", + "smithy.api#xmlName": "availabilityZone" } }, "State": { - "target": "com.amazonaws.ec2#VolumeAttachmentState", + "target": "com.amazonaws.ec2#VolumeState", "traits": { "aws.protocols#ec2QueryName": "Status", - "smithy.api#documentation": "

The attachment state of the volume.

", + "smithy.api#documentation": "

The volume state.

", "smithy.api#xmlName": "status" } }, - "VolumeId": { - "target": "com.amazonaws.ec2#String", + "CreateTime": { + "target": "com.amazonaws.ec2#DateTime", "traits": { - "aws.protocols#ec2QueryName": "VolumeId", - "smithy.api#documentation": "

The ID of the volume.

", - "smithy.api#xmlName": "volumeId" + "aws.protocols#ec2QueryName": "CreateTime", + "smithy.api#documentation": "

The time stamp when volume creation was initiated.

", + "smithy.api#xmlName": "createTime" + } + }, + "Attachments": { + "target": "com.amazonaws.ec2#VolumeAttachmentList", + "traits": { + "aws.protocols#ec2QueryName": "AttachmentSet", + "smithy.api#documentation": "\n

This parameter is not returned by CreateVolume.

\n
\n

Information about the volume attachments.

", + "smithy.api#xmlName": "attachmentSet" + } + }, + "Encrypted": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Encrypted", + "smithy.api#documentation": "

Indicates whether the volume is encrypted.

", + "smithy.api#xmlName": "encrypted" } }, + "KmsKeyId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "KmsKeyId", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the KMS key that was used to protect the\n volume encryption key for the volume.

", + "smithy.api#xmlName": "kmsKeyId" + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes a volume.

" + } + }, + "com.amazonaws.ec2#VolumeAttachment": { + "type": "structure", + "members": { "DeleteOnTermination": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -106378,6 +107010,46 @@ "smithy.api#documentation": "

The service principal of Amazon Web Services service that owns the underlying \n instance to which the volume is attached.

\n

This parameter is returned only for volumes that are attached to \n Fargate tasks.

", "smithy.api#xmlName": "instanceOwningService" } + }, + "VolumeId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VolumeId", + "smithy.api#documentation": "

The ID of the volume.

", + "smithy.api#xmlName": "volumeId" + } + }, + "InstanceId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "InstanceId", + "smithy.api#documentation": "

The ID of the instance.

\n

If the volume is attached to a Fargate task, this parameter \n returns null.

", + "smithy.api#xmlName": "instanceId" + } + }, + "Device": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Device", + "smithy.api#documentation": "

The device name.

\n

If the volume is attached to a Fargate task, this parameter \n returns null.

", + "smithy.api#xmlName": "device" + } + }, + "State": { + "target": "com.amazonaws.ec2#VolumeAttachmentState", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "

The attachment state of the volume.

", + "smithy.api#xmlName": "status" + } + }, + "AttachTime": { + "target": "com.amazonaws.ec2#DateTime", + "traits": { + "aws.protocols#ec2QueryName": "AttachTime", + "smithy.api#documentation": "

The time stamp when the attachment initiated.

", + "smithy.api#xmlName": "attachTime" + } } }, "traits": { @@ -107069,38 +107741,6 @@ "com.amazonaws.ec2#Vpc": { "type": "structure", "members": { - "CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CidrBlock", - "smithy.api#documentation": "

The primary IPv4 CIDR block for the VPC.

", - "smithy.api#xmlName": "cidrBlock" - } - }, - "DhcpOptionsId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "DhcpOptionsId", - "smithy.api#documentation": "

The ID of the set of DHCP options you've associated with the VPC.

", - "smithy.api#xmlName": "dhcpOptionsId" - } - }, - "State": { - "target": "com.amazonaws.ec2#VpcState", - "traits": { - "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The current state of the VPC.

", - "smithy.api#xmlName": "state" - } - }, - "VpcId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpcId", - "smithy.api#documentation": "

The ID of the VPC.

", - "smithy.api#xmlName": "vpcId" - } - }, "OwnerId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -107148,6 +107788,38 @@ "smithy.api#documentation": "

Any tags assigned to the VPC.

", "smithy.api#xmlName": "tagSet" } + }, + "VpcId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VpcId", + "smithy.api#documentation": "

The ID of the VPC.

", + "smithy.api#xmlName": "vpcId" + } + }, + "State": { + "target": "com.amazonaws.ec2#VpcState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

The current state of the VPC.

", + "smithy.api#xmlName": "state" + } + }, + "CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CidrBlock", + "smithy.api#documentation": "

The primary IPv4 CIDR block for the VPC.

", + "smithy.api#xmlName": "cidrBlock" + } + }, + "DhcpOptionsId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "DhcpOptionsId", + "smithy.api#documentation": "

The ID of the set of DHCP options you've associated with the VPC.

", + "smithy.api#xmlName": "dhcpOptionsId" + } } }, "traits": { @@ -107157,14 +107829,6 @@ "com.amazonaws.ec2#VpcAttachment": { "type": "structure", "members": { - "State": { - "target": "com.amazonaws.ec2#AttachmentStatus", - "traits": { - "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The current state of the attachment.

", - "smithy.api#xmlName": "state" - } - }, "VpcId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -107172,6 +107836,14 @@ "smithy.api#documentation": "

The ID of the VPC.

", "smithy.api#xmlName": "vpcId" } + }, + "State": { + "target": "com.amazonaws.ec2#AttachmentStatus", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

The current state of the attachment.

", + "smithy.api#xmlName": "state" + } } }, "traits": { @@ -108104,22 +108776,6 @@ "com.amazonaws.ec2#VpnConnection": { "type": "structure", "members": { - "CustomerGatewayConfiguration": { - "target": "com.amazonaws.ec2#customerGatewayConfiguration", - "traits": { - "aws.protocols#ec2QueryName": "CustomerGatewayConfiguration", - "smithy.api#documentation": "

The configuration information for the VPN connection's customer gateway (in the native\n XML format). This element is always present in the CreateVpnConnection\n response; however, it's present in the DescribeVpnConnections response\n only if the VPN connection is in the pending or available\n state.

", - "smithy.api#xmlName": "customerGatewayConfiguration" - } - }, - "CustomerGatewayId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "CustomerGatewayId", - "smithy.api#documentation": "

The ID of the customer gateway at your end of the VPN connection.

", - "smithy.api#xmlName": "customerGatewayId" - } - }, "Category": { "target": "com.amazonaws.ec2#String", "traits": { @@ -108128,38 +108784,6 @@ "smithy.api#xmlName": "category" } }, - "State": { - "target": "com.amazonaws.ec2#VpnState", - "traits": { - "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The current state of the VPN connection.

", - "smithy.api#xmlName": "state" - } - }, - "Type": { - "target": "com.amazonaws.ec2#GatewayType", - "traits": { - "aws.protocols#ec2QueryName": "Type", - "smithy.api#documentation": "

The type of VPN connection.

", - "smithy.api#xmlName": "type" - } - }, - "VpnConnectionId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpnConnectionId", - "smithy.api#documentation": "

The ID of the VPN connection.

", - "smithy.api#xmlName": "vpnConnectionId" - } - }, - "VpnGatewayId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpnGatewayId", - "smithy.api#documentation": "

The ID of the virtual private gateway at the Amazon Web Services side of the VPN\n connection.

", - "smithy.api#xmlName": "vpnGatewayId" - } - }, "TransitGatewayId": { "target": "com.amazonaws.ec2#String", "traits": { @@ -108223,6 +108847,54 @@ "smithy.api#documentation": "

Information about the VPN tunnel.

", "smithy.api#xmlName": "vgwTelemetry" } + }, + "VpnConnectionId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VpnConnectionId", + "smithy.api#documentation": "

The ID of the VPN connection.

", + "smithy.api#xmlName": "vpnConnectionId" + } + }, + "State": { + "target": "com.amazonaws.ec2#VpnState", + "traits": { + "aws.protocols#ec2QueryName": "State", + "smithy.api#documentation": "

The current state of the VPN connection.

", + "smithy.api#xmlName": "state" + } + }, + "CustomerGatewayConfiguration": { + "target": "com.amazonaws.ec2#customerGatewayConfiguration", + "traits": { + "aws.protocols#ec2QueryName": "CustomerGatewayConfiguration", + "smithy.api#documentation": "

The configuration information for the VPN connection's customer gateway (in the native\n XML format). This element is always present in the CreateVpnConnection\n response; however, it's present in the DescribeVpnConnections response\n only if the VPN connection is in the pending or available\n state.

", + "smithy.api#xmlName": "customerGatewayConfiguration" + } + }, + "Type": { + "target": "com.amazonaws.ec2#GatewayType", + "traits": { + "aws.protocols#ec2QueryName": "Type", + "smithy.api#documentation": "

The type of VPN connection.

", + "smithy.api#xmlName": "type" + } + }, + "CustomerGatewayId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "CustomerGatewayId", + "smithy.api#documentation": "

The ID of the customer gateway at your end of the VPN connection.

", + "smithy.api#xmlName": "customerGatewayId" + } + }, + "VpnGatewayId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VpnGatewayId", + "smithy.api#documentation": "

The ID of the virtual private gateway at the Amazon Web Services side of the VPN\n connection.

", + "smithy.api#xmlName": "vpnGatewayId" + } } }, "traits": { @@ -108405,14 +109077,6 @@ "smithy.api#documentation": "

Indicate whether to enable acceleration for the VPN connection.

\n

Default: false\n

" } }, - "StaticRoutesOnly": { - "target": "com.amazonaws.ec2#Boolean", - "traits": { - "aws.protocols#ec2QueryName": "StaticRoutesOnly", - "smithy.api#documentation": "

Indicate whether the VPN connection uses static routes only. If you are creating a VPN\n connection for a device that does not support BGP, you must specify true.\n Use CreateVpnConnectionRoute to create a static route.

\n

Default: false\n

", - "smithy.api#xmlName": "staticRoutesOnly" - } - }, "TunnelInsideIpVersion": { "target": "com.amazonaws.ec2#TunnelInsideIpVersion", "traits": { @@ -108460,6 +109124,14 @@ "traits": { "smithy.api#documentation": "

The transit gateway attachment ID to use for the VPN tunnel.

\n

Required if OutsideIpAddressType is set to PrivateIpv4.

" } + }, + "StaticRoutesOnly": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "StaticRoutesOnly", + "smithy.api#documentation": "

Indicate whether the VPN connection uses static routes only. If you are creating a VPN\n connection for a device that does not support BGP, you must specify true.\n Use CreateVpnConnectionRoute to create a static route.

\n

Default: false\n

", + "smithy.api#xmlName": "staticRoutesOnly" + } } }, "traits": { @@ -108486,12 +109158,28 @@ "com.amazonaws.ec2#VpnGateway": { "type": "structure", "members": { - "AvailabilityZone": { + "AmazonSideAsn": { + "target": "com.amazonaws.ec2#Long", + "traits": { + "aws.protocols#ec2QueryName": "AmazonSideAsn", + "smithy.api#documentation": "

The private Autonomous System Number (ASN) for the Amazon side of a BGP\n session.

", + "smithy.api#xmlName": "amazonSideAsn" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

Any tags assigned to the virtual private gateway.

", + "smithy.api#xmlName": "tagSet" + } + }, + "VpnGatewayId": { "target": "com.amazonaws.ec2#String", "traits": { - "aws.protocols#ec2QueryName": "AvailabilityZone", - "smithy.api#documentation": "

The Availability Zone where the virtual private gateway was created, if applicable.\n This field may be empty or not returned.

", - "smithy.api#xmlName": "availabilityZone" + "aws.protocols#ec2QueryName": "VpnGatewayId", + "smithy.api#documentation": "

The ID of the virtual private gateway.

", + "smithy.api#xmlName": "vpnGatewayId" } }, "State": { @@ -108510,6 +109198,14 @@ "smithy.api#xmlName": "type" } }, + "AvailabilityZone": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "AvailabilityZone", + "smithy.api#documentation": "

The Availability Zone where the virtual private gateway was created, if applicable.\n This field may be empty or not returned.

", + "smithy.api#xmlName": "availabilityZone" + } + }, "VpcAttachments": { "target": "com.amazonaws.ec2#VpcAttachmentList", "traits": { @@ -108517,30 +109213,6 @@ "smithy.api#documentation": "

Any VPCs attached to the virtual private gateway.

", "smithy.api#xmlName": "attachments" } - }, - "VpnGatewayId": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "VpnGatewayId", - "smithy.api#documentation": "

The ID of the virtual private gateway.

", - "smithy.api#xmlName": "vpnGatewayId" - } - }, - "AmazonSideAsn": { - "target": "com.amazonaws.ec2#Long", - "traits": { - "aws.protocols#ec2QueryName": "AmazonSideAsn", - "smithy.api#documentation": "

The private Autonomous System Number (ASN) for the Amazon side of a BGP\n session.

", - "smithy.api#xmlName": "amazonSideAsn" - } - }, - "Tags": { - "target": "com.amazonaws.ec2#TagList", - "traits": { - "aws.protocols#ec2QueryName": "TagSet", - "smithy.api#documentation": "

Any tags assigned to the virtual private gateway.

", - "smithy.api#xmlName": "tagSet" - } } }, "traits": { diff --git a/models/ecr.json b/models/ecr.json index 01682344f8..fe5e547ed2 100644 --- a/models/ecr.json +++ b/models/ecr.json @@ -3499,7 +3499,7 @@ "encryptionType": { "target": "com.amazonaws.ecr#EncryptionType", "traits": { - "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created.

\n

If you use the KMS_DSSE encryption type, the contents of the repository\n will be encrypted with two layers of encryption using server-side encryption with the\n KMS Management Service key stored in KMS. Similar to the KMS encryption type, you\n can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS\n key, which you've already created.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.

", + "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created.

\n

If you use the KMS_DSSE encryption type, the contents of the repository\n will be encrypted with two layers of encryption using server-side encryption with the\n KMS Management Service key stored in KMS. Similar to the KMS encryption type, you\n can either use the default Amazon Web Services managed KMS key for Amazon ECR, or specify your own KMS\n key, which you've already created.

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES256 encryption algorithm.

\n

For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

", "smithy.api#required": {} } }, @@ -3651,6 +3651,18 @@ "traits": { "smithy.api#documentation": "

The date and time the finding was last updated at.

" } + }, + "fixAvailable": { + "target": "com.amazonaws.ecr#FixAvailable", + "traits": { + "smithy.api#documentation": "

Details on whether a fix is available through a version update. This value can be\n YES, NO, or PARTIAL. A PARTIAL\n fix means that some, but not all, of the packages identified in the finding have fixes\n available through updated versions.

" + } + }, + "exploitAvailable": { + "target": "com.amazonaws.ecr#ExploitAvailable", + "traits": { + "smithy.api#documentation": "

If a finding discovered in your environment has an exploit available.

" + } } }, "traits": { @@ -3675,6 +3687,9 @@ "com.amazonaws.ecr#ExpirationTimestamp": { "type": "timestamp" }, + "com.amazonaws.ecr#ExploitAvailable": { + "type": "string" + }, "com.amazonaws.ecr#FilePath": { "type": "string" }, @@ -3737,6 +3752,12 @@ "target": "com.amazonaws.ecr#SeverityCount" } }, + "com.amazonaws.ecr#FixAvailable": { + "type": "string" + }, + "com.amazonaws.ecr#FixedInVersion": { + "type": "string" + }, "com.amazonaws.ecr#ForceFlag": { "type": "boolean", "traits": { @@ -8639,6 +8660,12 @@ "traits": { "smithy.api#documentation": "

The version of the vulnerable package.

" } + }, + "fixedInVersion": { + "target": "com.amazonaws.ecr#FixedInVersion", + "traits": { + "smithy.api#documentation": "

The version of the package that contains the vulnerability fix.

" + } } }, "traits": { diff --git a/models/ecs.json b/models/ecs.json index c65b3d1beb..817557e4f8 100644 --- a/models/ecs.json +++ b/models/ecs.json @@ -1681,7 +1681,7 @@ } }, "traits": { - "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTaskor CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateClusterCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

With FARGATE_SPOT, you can run interruption tolerant tasks at a rate\n\t\t\tthat's discounted compared to the FARGATE price. FARGATE_SPOT\n\t\t\truns tasks on spare compute capacity. When Amazon Web Services needs the capacity back, your tasks are\n\t\t\tinterrupted with a two-minute warning. FARGATE_SPOT only supports Linux\n\t\t\ttasks with the X86_64 architecture on platform version 1.3.0 or later.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" + "smithy.api#documentation": "

The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTaskor CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.

\n

Only capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE or UPDATING status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.

\n

If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateClusterCapacityProvider API operation.

\n

To use a Fargate capacity provider, specify either the FARGATE or\n\t\t\t\tFARGATE_SPOT capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.

\n

With FARGATE_SPOT, you can run interruption tolerant tasks at a rate that's\n\t\t\tdiscounted compared to the FARGATE price. FARGATE_SPOT runs\n\t\t\ttasks on spare compute capacity. When Amazon Web Services needs the capacity back, your tasks are\n\t\t\tinterrupted with a two-minute warning. FARGATE_SPOT supports Linux tasks\n\t\t\twith the X86_64 architecture on platform version 1.3.0 or later.\n\t\t\t\tFARGATE_SPOT supports Linux tasks with the ARM64 architecture on\n\t\t\tplatform version 1.4.0 or later.

\n

A capacity provider strategy may contain a maximum of 6 capacity providers.

" } }, "com.amazonaws.ecs#CapacityProviderStrategyItemBase": { @@ -2279,13 +2279,13 @@ "name": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name of one container can be entered in the\n\t\t\t\tlinks of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in tthe docker conainer create command and the\n\t\t\t\t--name option to docker\n\t\t\trun.

" + "smithy.api#documentation": "

The name of a container. If you're linking multiple containers together in a task\n\t\t\tdefinition, the name of one container can be entered in the\n\t\t\t\tlinks of another container to connect the containers.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to name in the docker container create command and the\n\t\t\t\t--name option to docker\n\t\t\trun.

" } }, "image": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n or \n repository-url/image@digest\n . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker conainer create command and the\n\t\t\t\tIMAGE parameter of docker\n\t\t\t\trun.

\n
    \n
  • \n

    When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.

    \n
  • \n
  • \n

    Images in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag or\n\t\t\t\t\t\tregistry/repository@digest. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/:latest\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.\n\t\t\t\t

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu).

    \n
  • \n
" + "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n or \n repository-url/image@digest\n . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the docker container create command and the\n\t\t\t\tIMAGE parameter of docker\n\t\t\t\trun.

\n
    \n
  • \n

    When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.

    \n
  • \n
  • \n

    Images in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag or\n\t\t\t\t\t\tregistry/repository@digest. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/:latest\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE.\n\t\t\t\t

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu).

    \n
  • \n
" } }, "repositoryCredentials": { @@ -2298,31 +2298,31 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The number of cpu units reserved for the container. This parameter maps\n\t\t\tto CpuShares in the docker conainer create commandand the --cpu-shares option to docker run.

\n

This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu value.

\n \n

You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.

\n
\n

Linux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.

\n

On Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2, and the\n\t\t\tmaximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2\n\t\t\t(including null) or above 262144, the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:

\n
    \n
  • \n

    \n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to\n\t\t\t\t\t\t1.84.0: CPU values greater than 256 vCPU are passed to Docker as\n\t\t\t\t\t256, which is equivalent to 262144 CPU shares.

    \n
  • \n
\n

On Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0, which Windows interprets as 1% of one CPU.

" + "smithy.api#documentation": "

The number of cpu units reserved for the container. This parameter maps\n\t\t\tto CpuShares in the docker container create commandand the --cpu-shares option to docker run.

\n

This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu value.

\n \n

You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.

\n
\n

Linux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.

\n

On Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2, and the\n\t\t\tmaximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2\n\t\t\t(including null) or above 262144, the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:

\n
    \n
  • \n

    \n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.

    \n
  • \n
  • \n

    \n Agent versions greater than or equal to\n\t\t\t\t\t\t1.84.0: CPU values greater than 256 vCPU are passed to Docker as\n\t\t\t\t\t256, which is equivalent to 262144 CPU shares.

    \n
  • \n
\n

On Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0, which Windows interprets as 1% of one CPU.

" } }, "memory": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory value, if one is specified. This parameter maps to\n\t\t\tMemory in thethe docker conainer create command and the --memory option to docker run.

\n

If using the Fargate launch type, this parameter is optional.

\n

If using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory and memoryReservation value, memory\n\t\t\tmust be greater than memoryReservation. If you specify\n\t\t\t\tmemoryReservation, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory is used.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" + "smithy.api#documentation": "

The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory value, if one is specified. This parameter maps to\n\t\t\tMemory in the docker container create command and the --memory option to docker run.

\n

If using the Fargate launch type, this parameter is optional.

\n

If using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory and memoryReservation value, memory\n\t\t\tmust be greater than memoryReservation. If you specify\n\t\t\t\tmemoryReservation, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory is used.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" } }, "memoryReservation": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation in the the docker conainer create command and the --memory-reservation option to docker run.

\n

If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory or memoryReservation in a container\n\t\t\tdefinition. If you specify both, memory must be greater than\n\t\t\t\tmemoryReservation. If you specify memoryReservation, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory is\n\t\t\tused.

\n

For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation of 128 MiB, and a memory hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" + "smithy.api#documentation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation in the docker container create command and the --memory-reservation option to docker run.

\n

If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory or memoryReservation in a container\n\t\t\tdefinition. If you specify both, memory must be greater than\n\t\t\t\tmemoryReservation. If you specify memoryReservation, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory is\n\t\t\tused.

\n

For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation of 128 MiB, and a memory hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.

\n

The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.

\n

The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.

" } }, "links": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The links parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge. The name:internalName\n\t\t\tconstruct is analogous to name:alias in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker conainer create command and the\n\t\t\t\t--link option to docker\n\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

Containers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.

\n
" + "smithy.api#documentation": "

The links parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge. The name:internalName\n\t\t\tconstruct is analogous to name:alias in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to Links in the docker container create command and the\n\t\t\t\t--link option to docker\n\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
\n \n

Containers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.

\n
" } }, "portMappings": { "target": "com.amazonaws.ecs#PortMappingList", "traits": { - "smithy.api#documentation": "

The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.

\n

For task definitions that use the awsvpc network mode, only specify the\n\t\t\t\tcontainerPort. The hostPort can be left blank or it must\n\t\t\tbe the same value as the containerPort.

\n

Port mappings on Windows use the NetNAT gateway address rather than\n\t\t\t\tlocalhost. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.

\n

This parameter maps to PortBindings in the\n\t\t\tthe docker conainer create command and the\n\t\t\t\t--publish option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.

\n \n

After a task reaches the RUNNING status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\tnetworkBindings section DescribeTasks\n\t\t\t\tresponses.

\n
" + "smithy.api#documentation": "

The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.

\n

For task definitions that use the awsvpc network mode, only specify the\n\t\t\t\tcontainerPort. The hostPort can be left blank or it must\n\t\t\tbe the same value as the containerPort.

\n

Port mappings on Windows use the NetNAT gateway address rather than\n\t\t\t\tlocalhost. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.

\n

This parameter maps to PortBindings in the\n\t\t\tthe docker container create command and the\n\t\t\t\t--publish option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.

\n \n

After a task reaches the RUNNING status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\tnetworkBindings section DescribeTasks\n\t\t\t\tresponses.

\n
" } }, "essential": { @@ -2340,19 +2340,19 @@ "entryPoint": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "\n

Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint parameters. If you have problems using\n\t\t\t\t\tentryPoint, update your container agent or enter your commands and\n\t\t\t\targuments as command array items instead.

\n
\n

The entry point that's passed to the container. This parameter maps to\n\t\t\tEntrypoint in tthe docker conainer create command and the --entrypoint option to docker run.

" + "smithy.api#documentation": "\n

Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint parameters. If you have problems using\n\t\t\t\t\tentryPoint, update your container agent or enter your commands and\n\t\t\t\targuments as command array items instead.

\n
\n

The entry point that's passed to the container. This parameter maps to\n\t\t\tEntrypoint in the docker container create command and the --entrypoint option to docker run.

" } }, "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The command that's passed to the container. This parameter maps to Cmd in\n\t\t\tthe docker conainer create command and the\n\t\t\t\tCOMMAND parameter to docker\n\t\t\t\trun. If there are multiple arguments, each\n\t\t\targument is a separated string in the array.

" + "smithy.api#documentation": "

The command that's passed to the container. This parameter maps to Cmd in\n\t\t\tthe docker container create command and the\n\t\t\t\tCOMMAND parameter to docker\n\t\t\t\trun. If there are multiple arguments, each\n\t\t\targument is a separated string in the array.

" } }, "environment": { "target": "com.amazonaws.ecs#EnvironmentVariables", "traits": { - "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to\n\t\t\tEnv in the docker conainer create command and the --env option to docker run.

\n \n

We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.

\n
" + "smithy.api#documentation": "

The environment variables to pass to a container. This parameter maps to\n\t\t\tEnv in the docker container create command and the --env option to docker run.

\n \n

We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.

\n
" } }, "environmentFiles": { @@ -2364,13 +2364,13 @@ "mountPoints": { "target": "com.amazonaws.ecs#MountPointList", "traits": { - "smithy.api#documentation": "

The mount points for data volumes in your container.

\n

This parameter maps to Volumes in the the docker conainer create command and the --volume option to docker run.

\n

Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.

" + "smithy.api#documentation": "

The mount points for data volumes in your container.

\n

This parameter maps to Volumes in the docker container create command and the --volume option to docker run.

\n

Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.

" } }, "volumesFrom": { "target": "com.amazonaws.ecs#VolumeFromList", "traits": { - "smithy.api#documentation": "

Data volumes to mount from another container. This parameter maps to\n\t\t\tVolumesFrom in tthe docker conainer create command and the --volumes-from option to docker run.

" + "smithy.api#documentation": "

Data volumes to mount from another container. This parameter maps to\n\t\t\tVolumesFrom in the docker container create command and the --volumes-from option to docker run.

" } }, "linuxParameters": { @@ -2400,109 +2400,109 @@ "stopTimeout": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.

\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.

\n

For tasks that use the EC2 launch type, if the stopTimeout\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the\n\t\t\t\tstopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop timeout\n\t\t\tvalue. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

\n

The valid values are 2-120 seconds.

" + "smithy.api#documentation": "

Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.

\n

For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:

\n
    \n
  • \n

    Linux platform version 1.3.0 or later.

    \n
  • \n
  • \n

    Windows platform version 1.0.0 or later.

    \n
  • \n
\n

For tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.

\n

For tasks that use the EC2 launch type, if the stopTimeout\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT is used. If neither the\n\t\t\t\tstopTimeout parameter or the ECS_CONTAINER_STOP_TIMEOUT\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop timeout\n\t\t\tvalue. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init package. If your container instances are launched from version\n\t\t\t\t20190301 or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

\n

The valid values for Fargate are 2-120 seconds.

" } }, "hostname": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The hostname to use for your container. This parameter maps to Hostname\n\t\t\tin thethe docker conainer create command and the\n\t\t\t\t--hostname option to docker\n\t\t\t\trun.

\n \n

The hostname parameter is not supported if you're using the\n\t\t\t\t\tawsvpc network mode.

\n
" + "smithy.api#documentation": "

The hostname to use for your container. This parameter maps to Hostname\n\t\t\tin the docker container create command and the\n\t\t\t\t--hostname option to docker\n\t\t\t\trun.

\n \n

The hostname parameter is not supported if you're using the\n\t\t\t\t\tawsvpc network mode.

\n
" } }, "user": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The user to use inside the container. This parameter maps to User in the docker conainer create command and the\n\t\t\t\t--user option to docker\n\t\t\trun.

\n \n

When running tasks using the host network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.

\n
\n

You can specify the user using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n user:group\n

    \n
  • \n
  • \n

    \n uid\n

    \n
  • \n
  • \n

    \n uid:gid\n

    \n
  • \n
  • \n

    \n user:gid\n

    \n
  • \n
  • \n

    \n uid:group\n

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

The user to use inside the container. This parameter maps to User in the docker container create command and the\n\t\t\t\t--user option to docker\n\t\t\trun.

\n \n

When running tasks using the host network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.

\n
\n

You can specify the user using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.

\n
    \n
  • \n

    \n user\n

    \n
  • \n
  • \n

    \n user:group\n

    \n
  • \n
  • \n

    \n uid\n

    \n
  • \n
  • \n

    \n uid:gid\n

    \n
  • \n
  • \n

    \n user:gid\n

    \n
  • \n
  • \n

    \n uid:group\n

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
" } }, "workingDirectory": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The working directory to run commands inside the container in. This parameter maps to\n\t\t\tWorkingDir in the docker conainer create command and the --workdir option to docker run.

" + "smithy.api#documentation": "

The working directory to run commands inside the container in. This parameter maps to\n\t\t\tWorkingDir in the docker container create command and the --workdir option to docker run.

" } }, "disableNetworking": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled in the docker conainer create command.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

When this parameter is true, networking is off within the container. This parameter\n\t\t\tmaps to NetworkDisabled in the docker container create command.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "privileged": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root user). This parameter maps to\n\t\t\tPrivileged in the the docker conainer create command and the --privileged option to docker run

\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" + "smithy.api#documentation": "

When this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root user). This parameter maps to\n\t\t\tPrivileged in the docker container create command and the --privileged option to docker run

\n \n

This parameter is not supported for Windows containers or tasks run on Fargate.

\n
" } }, "readonlyRootFilesystem": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs in the docker conainer create command and the\n\t\t\t\t--read-only option to docker\n\t\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

When this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs in the docker container create command and the\n\t\t\t\t--read-only option to docker\n\t\t\t\trun.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "dnsServers": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of DNS servers that are presented to the container. This parameter maps to\n\t\t\tDns in the the docker conainer create command and the --dns option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of DNS servers that are presented to the container. This parameter maps to\n\t\t\tDns in the docker container create command and the --dns option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "dnsSearchDomains": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch in the docker conainer create command and the --dns-search option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch in the docker container create command and the --dns-search option to docker run.

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "extraHosts": { "target": "com.amazonaws.ecs#HostEntryList", "traits": { - "smithy.api#documentation": "

A list of hostnames and IP address mappings to append to the /etc/hosts\n\t\t\tfile on the container. This parameter maps to ExtraHosts in the docker conainer create command and the\n\t\t\t\t--add-host option to docker\n\t\t\t\trun.

\n \n

This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc network mode.

\n
" + "smithy.api#documentation": "

A list of hostnames and IP address mappings to append to the /etc/hosts\n\t\t\tfile on the container. This parameter maps to ExtraHosts in the docker container create command and the\n\t\t\t\t--add-host option to docker\n\t\t\t\trun.

\n \n

This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc network mode.

\n
" } }, "dockerSecurityOptions": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks\n\t\t\tusing the Fargate launch type.

\n

For Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.

\n

For any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.

\n

This parameter maps to SecurityOpt in the docker conainer create command and the\n\t\t\t\t--security-opt option to docker\n\t\t\t\trun.

\n \n

The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
\n

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"

" + "smithy.api#documentation": "

A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks\n\t\t\tusing the Fargate launch type.

\n

For Linux tasks on EC2, this parameter can be used to reference custom\n\t\t\tlabels for SELinux and AppArmor multi-level security systems.

\n

For any tasks on EC2, this parameter can be used to reference a\n\t\t\tcredential spec file that configures a container for Active Directory authentication.\n\t\t\tFor more information, see Using gMSAs for Windows\n\t\t\t\tContainers and Using gMSAs for Linux\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.

\n

This parameter maps to SecurityOpt in the docker container create command and the\n\t\t\t\t--security-opt option to docker\n\t\t\t\trun.

\n \n

The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
\n

Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"

" } }, "interactive": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, you can deploy containerized applications\n\t\t\tthat require stdin or a tty to be allocated. This parameter\n\t\t\tmaps to OpenStdin in the docker conainer create command and the --interactive option to docker run.

" + "smithy.api#documentation": "

When this parameter is true, you can deploy containerized applications\n\t\t\tthat require stdin or a tty to be allocated. This parameter\n\t\t\tmaps to OpenStdin in the docker container create command and the --interactive option to docker run.

" } }, "pseudoTerminal": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "

When this parameter is true, a TTY is allocated. This parameter maps to\n\t\t\tTty in tthe docker conainer create command and the --tty option to docker run.

" + "smithy.api#documentation": "

When this parameter is true, a TTY is allocated. This parameter maps to\n\t\t\tTty in the docker container create command and the --tty option to docker run.

" } }, "dockerLabels": { "target": "com.amazonaws.ecs#DockerLabelsMap", "traits": { - "smithy.api#documentation": "

A key/value map of labels to add to the container. This parameter maps to\n\t\t\tLabels in the docker conainer create command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" + "smithy.api#documentation": "

A key/value map of labels to add to the container. This parameter maps to\n\t\t\tLabels in the docker container create command and the --label option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" } }, "ulimits": { "target": "com.amazonaws.ecs#UlimitList", "traits": { - "smithy.api#documentation": "

A list of ulimits to set in the container. If a ulimit value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits in tthe docker conainer create command and the --ulimit option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

This parameter is not supported for Windows containers.

\n
" + "smithy.api#documentation": "

A list of ulimits to set in the container. If a ulimit value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits in the docker container create command and the --ulimit option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.

\n

Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile soft limit is 65535 and the default hard limit\n\t\t\t\t\t\t\tis 65535.

\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

This parameter is not supported for Windows containers.

\n
" } }, "logConfiguration": { "target": "com.amazonaws.ecs#LogConfiguration", "traits": { - "smithy.api#documentation": "

The log configuration specification for the container.

\n

This parameter maps to LogConfig in the docker conainer create command and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions).

\n \n

Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.

\n
\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
" + "smithy.api#documentation": "

The log configuration specification for the container.

\n

This parameter maps to LogConfig in the docker container create command and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions).

\n \n

Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.

\n
\n

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

\n \n

The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.

\n
" } }, "healthCheck": { "target": "com.amazonaws.ecs#HealthCheck", "traits": { - "smithy.api#documentation": "

The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck in the docker conainer create command and the\n\t\t\t\tHEALTHCHECK parameter of docker\n\t\t\t\trun.

" + "smithy.api#documentation": "

The container health check command and associated configuration parameters for the\n\t\t\tcontainer. This parameter maps to HealthCheck in the docker container create command and the\n\t\t\t\tHEALTHCHECK parameter of docker\n\t\t\t\trun.

" } }, "systemControls": { "target": "com.amazonaws.ecs#SystemControls", "traits": { - "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls in tthe docker conainer create command and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

" + "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls in the docker container create command and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

" } }, "resourceRequirements": { @@ -3136,7 +3136,7 @@ } ], "traits": { - "smithy.api#documentation": "

Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, use UpdateService.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and are reported as\n\t\t\thealthy by the load balancer.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
\n

You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

\n

If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

\n

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

\n

If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

\n

When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

\n

When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n

\n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

", + "smithy.api#documentation": "

Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, use UpdateService.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING state and are reported as\n\t\t\thealthy by the load balancer.

\n

There are two service scheduler strategies available:

\n
    \n
  • \n

    \n REPLICA - The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n DAEMON - The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
\n

You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent is 0%.

\n

If a service uses the ECS deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.

\n

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.

\n

If a service uses either the CODE_DEPLOY or EXTERNAL\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING state.\n\t\t\tThis is while the container instances are in the DRAINING state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.

\n

When creating a service that uses the EXTERNAL deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

\n

When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n

", "smithy.api#examples": [ { "title": "To create a new service", @@ -4281,13 +4281,13 @@ "maximumPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tmaximumPercent parameter represents an upper limit on the number of\n\t\t\tyour service's tasks that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount (rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using\n\t\t\tthe REPLICA service scheduler and has a desiredCount of four\n\t\t\ttasks and a maximumPercent value of 200%, the scheduler may start four new\n\t\t\ttasks before stopping the four older tasks (provided that the cluster resources required\n\t\t\tto do this are available). The default maximumPercent value for a service\n\t\t\tusing the REPLICA service scheduler is 200%.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and tasks that use the EC2\n\t\t\tlaunch type, the maximum percent value is set to the\n\t\t\tdefault value and is used to define the upper limit on the number of the tasks in the\n\t\t\tservice that remain in the RUNNING state while the container instances are\n\t\t\tin the DRAINING state. If the tasks in the service use the\n\t\t\tFargate launch type, the maximum percent value is not used, although it is\n\t\t\treturned when describing your service.

" + "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tmaximumPercent parameter represents an upper limit on the number of\n\t\t\tyour service's tasks that are allowed in the RUNNING or\n\t\t\t\tPENDING state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount (rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using\n\t\t\tthe REPLICA service scheduler and has a desiredCount of four\n\t\t\ttasks and a maximumPercent value of 200%, the scheduler may start four new\n\t\t\ttasks before stopping the four older tasks (provided that the cluster resources required\n\t\t\tto do this are available). The default maximumPercent value for a service\n\t\t\tusing the REPLICA service scheduler is 200%.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types, and tasks in the service use the EC2\n\t\t\tlaunch type, the maximum percent value is set to the\n\t\t\tdefault value. The maximum percent value is used to define the upper limit on the number of the tasks in the\n\t\t\tservice that remain in the RUNNING state while the container instances are\n\t\t\tin the DRAINING state.

\n \n

You can't specify a custom maximumPercent value for a service that uses either the blue/green (CODE_DEPLOY) or\n\t\t\tEXTERNAL deployment types and has tasks that use the EC2 launch type.

\n
\n

If the tasks in the service use the\n\t\t\tFargate launch type, the maximum percent value is not used, although it is\n\t\t\treturned when describing your service.

" } }, "minimumHealthyPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tminimumHealthyPercent represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING state during a deployment,\n\t\t\tas a percentage of the desiredCount (rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount of four tasks and a\n\t\t\t\tminimumHealthyPercent of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.

\n

For services that do not use a load balancer, the following\n\t\t\tshould be noted:

\n
    \n
  • \n

    A service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.

    \n
  • \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.

    \n
  • \n
\n

For services that do use a load balancer, the following should be\n\t\t\tnoted:

\n
    \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.

    \n
  • \n
\n

The default value for a replica service for minimumHealthyPercent is\n\t\t\t100%. The default minimumHealthyPercent value for a service using the\n\t\t\t\tDAEMON service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the\n\t\t\tAPIs and 50% for the Amazon Web Services Management Console.

\n

The minimum number of healthy tasks during a deployment is the\n\t\t\t\tdesiredCount multiplied by the minimumHealthyPercent/100,\n\t\t\trounded up to the nearest integer value.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING\n\t\t\tstate while the container instances are in the DRAINING state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.

" + "smithy.api#documentation": "

If a service is using the rolling update (ECS) deployment type, the\n\t\t\t\tminimumHealthyPercent represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING state during a deployment,\n\t\t\tas a percentage of the desiredCount (rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount of four tasks and a\n\t\t\t\tminimumHealthyPercent of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.

\n

For services that do not use a load balancer, the following\n\t\t\tshould be noted:

\n
    \n
  • \n

    A service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.

    \n
  • \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.

    \n
  • \n
\n

For services that do use a load balancer, the following should be\n\t\t\tnoted:

\n
    \n
  • \n

    If a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.

    \n
  • \n
  • \n

    If a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.

    \n
  • \n
\n

The default value for a replica service for minimumHealthyPercent is\n\t\t\t100%. The default minimumHealthyPercent value for a service using the\n\t\t\t\tDAEMON service schedule is 0% for the CLI, the Amazon Web Services SDKs, and the\n\t\t\tAPIs and 50% for the Amazon Web Services Management Console.

\n

The minimum number of healthy tasks during a deployment is the\n\t\t\t\tdesiredCount multiplied by the minimumHealthyPercent/100,\n\t\t\trounded up to the nearest integer value.

\n

If a service is using either the blue/green (CODE_DEPLOY) or\n\t\t\t\tEXTERNAL deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value. The minimum healthy percent value is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING\n\t\t\tstate while the container instances are in the DRAINING state.

\n \n

You can't specify a custom minimumHealthyPercent value for a service that uses either the blue/green (CODE_DEPLOY) or\n\t\t\tEXTERNAL deployment types and has tasks that use the EC2 launch type.

\n
\n

If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY) or EXTERNAL\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.

" } }, "alarms": { @@ -5591,7 +5591,7 @@ "driver": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. This parameter maps to Driver in the docker conainer create command and the\n\t\t\t\txxdriver option to docker\n\t\t\t\tvolume create.

" + "smithy.api#documentation": "

The Docker volume driver to use. The driver value must match the driver name provided\n\t\t\tby Docker because it is used for task placement. If the driver was installed using the\n\t\t\tDocker plugin CLI, use docker plugin ls to retrieve the driver name from\n\t\t\tyour container instance. If the driver was installed using another method, use Docker\n\t\t\tplugin discovery to retrieve the driver name. This parameter maps to Driver in the docker container create command and the\n\t\t\t\txxdriver option to docker\n\t\t\t\tvolume create.

" } }, "driverOpts": { @@ -5603,7 +5603,7 @@ "labels": { "target": "com.amazonaws.ecs#StringMap", "traits": { - "smithy.api#documentation": "

Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels in the docker conainer create command and the xxlabel option to docker\n\t\t\t\tvolume create.

" + "smithy.api#documentation": "

Custom metadata to add to your Docker volume. This parameter maps to\n\t\t\t\tLabels in the docker container create command and the xxlabel option to docker\n\t\t\t\tvolume create.

" } } }, @@ -6284,7 +6284,7 @@ "command": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD to run the command arguments\n\t\t\tdirectly, or CMD-SHELL to run the command with the container's default\n\t\t\tshell.

\n

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.

\n

\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n

\n

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

\n

\n CMD-SHELL, curl -f http://localhost/ || exit 1\n

\n

An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck in tthe docker conainer create command

", + "smithy.api#documentation": "

A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD to run the command arguments\n\t\t\tdirectly, or CMD-SHELL to run the command with the container's default\n\t\t\tshell.

\n

When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in double quotes and brackets.

\n

\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]\n

\n

You don't include the double quotes and brackets when you use the Amazon Web Services Management Console.

\n

\n CMD-SHELL, curl -f http://localhost/ || exit 1\n

\n

An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck in the docker container create command

", "smithy.api#required": {} } }, @@ -6573,13 +6573,13 @@ "add": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd in the docker conainer create command and the\n\t\t\t\t--cap-add option to docker\n\t\t\t\trun.

\n \n

Tasks launched on Fargate only support adding the SYS_PTRACE kernel\n\t\t\t\tcapability.

\n
\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" + "smithy.api#documentation": "

The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd in the docker container create command and the\n\t\t\t\t--cap-add option to docker\n\t\t\t\trun.

\n \n

Tasks launched on Fargate only support adding the SYS_PTRACE kernel\n\t\t\t\tcapability.

\n
\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" } }, "drop": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop in the docker conainer create command and the\n\t\t\t\t--cap-drop option to docker\n\t\t\t\trun.

\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" + "smithy.api#documentation": "

The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop in the docker container create command and the\n\t\t\t\t--cap-drop option to docker\n\t\t\t\trun.

\n

Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"\n

" } } }, @@ -6657,7 +6657,7 @@ "devices": { "target": "com.amazonaws.ecs#DevicesList", "traits": { - "smithy.api#documentation": "

Any host devices to expose to the container. This parameter maps to\n\t\t\tDevices in tthe docker conainer create command and the --device option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices parameter isn't supported.

\n
" + "smithy.api#documentation": "

Any host devices to expose to the container. This parameter maps to\n\t\t\tDevices in the docker container create command and the --device option to docker run.

\n \n

If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices parameter isn't supported.

\n
" } }, "initProcessEnabled": { @@ -7809,7 +7809,7 @@ "options": { "target": "com.amazonaws.ecs#LogConfigurationOptionsMap", "traits": { - "smithy.api#documentation": "

The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" + "smithy.api#documentation": "

The configuration options to send to the log driver.

\n

The options you can specify depend on the log driver. Some\n\t\t\t\tof the options you can specify when you use the awslogs log driver to route logs to\n\t\t\t\tAmazon CloudWatch include the following:

\n
\n
awslogs-create-group
\n
\n

Required: No

\n

Specify whether you want the log group to be\n\t\t\t\t\t\t\tcreated automatically. If this option isn't\n\t\t\t\t\t\t\tspecified, it defaults to\n\t\t\t\t\t\t\tfalse.

\n \n

Your IAM policy must include the\n\t\t\t\t\t\t\t\tlogs:CreateLogGroup permission before\n\t\t\t\t\t\t\t\tyou attempt to use\n\t\t\t\t\t\t\t\tawslogs-create-group.

\n
\n
\n
awslogs-region
\n
\n

Required: Yes

\n

Specify the Amazon Web Services Region that the\n\t\t\t\t\t\t\tawslogs log driver is to send your\n\t\t\t\t\t\t\tDocker logs to. You can choose to send all of your\n\t\t\t\t\t\t\tlogs from clusters in different Regions to a\n\t\t\t\t\t\t\tsingle region in CloudWatch Logs. This is so that they're\n\t\t\t\t\t\t\tall visible in one location. Otherwise, you can\n\t\t\t\t\t\t\tseparate them by Region for more granularity. Make\n\t\t\t\t\t\t\tsure that the specified log group exists in the\n\t\t\t\t\t\t\tRegion that you specify with this option.

\n
\n
awslogs-group
\n
\n

Required: Yes

\n

Make sure to specify a log group that the\n\t\t\t\t\t\t\tawslogs log driver sends its log\n\t\t\t\t\t\t\tstreams to.

\n
\n
awslogs-stream-prefix
\n
\n

Required: Yes, when\n\t\t\t\t\t\t\tusing the Fargate launch\n\t\t\t\t\t\t\ttype.Optional for\n\t\t\t\t\t\t\t\tthe EC2 launch type, required for\n\t\t\t\t\t\t\t\tthe Fargate launch\n\t\t\t\t\t\t\t\ttype.

\n

Use the awslogs-stream-prefix\n\t\t\t\t\t\t\toption to associate a log stream with the\n\t\t\t\t\t\t\tspecified prefix, the container name, and the ID\n\t\t\t\t\t\t\tof the Amazon ECS task that the container belongs to.\n\t\t\t\t\t\t\tIf you specify a prefix with this option, then the\n\t\t\t\t\t\t\tlog stream takes the format prefix-name/container-name/ecs-task-id.

\n

If you don't specify a prefix\n\t\t\t\t\t\t\twith this option, then the log stream is named\n\t\t\t\t\t\t\tafter the container ID that's assigned by the\n\t\t\t\t\t\t\tDocker daemon on the container instance. Because\n\t\t\t\t\t\t\tit's difficult to trace logs back to the container\n\t\t\t\t\t\t\tthat sent them with just the Docker container ID\n\t\t\t\t\t\t\t(which is only available on the container\n\t\t\t\t\t\t\tinstance), we recommend that you specify a prefix\n\t\t\t\t\t\t\twith this option.

\n

For Amazon ECS services, you can use the service\n\t\t\t\t\t\t\tname as the prefix. Doing so, you can trace log\n\t\t\t\t\t\t\tstreams to the service that the container belongs\n\t\t\t\t\t\t\tto, the name of the container that sent them, and\n\t\t\t\t\t\t\tthe ID of the task that the container belongs\n\t\t\t\t\t\t\tto.

\n

You must specify a\n\t\t\t\t\t\t\tstream-prefix for your logs to have your logs\n\t\t\t\t\t\t\tappear in the Log pane when using the Amazon ECS\n\t\t\t\t\t\t\tconsole.

\n
\n
awslogs-datetime-format
\n
\n

Required: No

\n

This option defines a multiline start pattern\n\t\t\t\t\t\t\tin Python strftime format. A log\n\t\t\t\t\t\t\tmessage consists of a line that matches the\n\t\t\t\t\t\t\tpattern and any following lines that don’t match\n\t\t\t\t\t\t\tthe pattern. The matched line is the delimiter\n\t\t\t\t\t\t\tbetween log messages.

\n

One example of a use case for using this\n\t\t\t\t\t\t\tformat is for parsing output such as a stack dump,\n\t\t\t\t\t\t\twhich might otherwise be logged in multiple\n\t\t\t\t\t\t\tentries. The correct pattern allows it to be\n\t\t\t\t\t\t\tcaptured in a single entry.

\n

For more information, see awslogs-datetime-format.

\n

You cannot configure both the\n\t\t\t\t\t\t\tawslogs-datetime-format and\n\t\t\t\t\t\t\tawslogs-multiline-pattern\n\t\t\t\t\t\t\toptions.

\n \n

Multiline logging performs regular\n\t\t\t\t\t\t\t\texpression parsing and matching of all log\n\t\t\t\t\t\t\t\tmessages. This might have a negative impact on\n\t\t\t\t\t\t\t\tlogging performance.

\n
\n
\n
awslogs-multiline-pattern
\n
\n

Required: No

\n

This option defines a multiline start pattern\n\t\t\t\t\t\t\tthat uses a regular expression. A log message\n\t\t\t\t\t\t\tconsists of a line that matches the pattern and\n\t\t\t\t\t\t\tany following lines that don’t match the pattern.\n\t\t\t\t\t\t\tThe matched line is the delimiter between log\n\t\t\t\t\t\t\tmessages.

\n

For more information, see awslogs-multiline-pattern.

\n

This option is ignored if\n\t\t\t\t\t\t\tawslogs-datetime-format is also\n\t\t\t\t\t\t\tconfigured.

\n

You cannot configure both the\n\t\t\t\t\t\t\tawslogs-datetime-format and\n\t\t\t\t\t\t\tawslogs-multiline-pattern\n\t\t\t\t\t\t\toptions.

\n \n

Multiline logging performs regular\n\t\t\t\t\t\t\t\texpression parsing and matching of all log\n\t\t\t\t\t\t\t\tmessages. This might have a negative impact on\n\t\t\t\t\t\t\t\tlogging performance.

\n
\n
\n
mode
\n
\n

Required: No

\n

Valid values: non-blocking |\n\t\t\t\t\t\t\tblocking\n

\n

This option defines the delivery mode of log\n\t\t\t\t\t\t\tmessages from the container to CloudWatch Logs. The delivery\n\t\t\t\t\t\t\tmode you choose affects application availability\n\t\t\t\t\t\t\twhen the flow of logs from container to CloudWatch is\n\t\t\t\t\t\t\tinterrupted.

\n

If you use the blocking\n\t\t\t\t\t\t\tmode and the flow of logs to CloudWatch is interrupted,\n\t\t\t\t\t\t\tcalls from container code to write to the\n\t\t\t\t\t\t\tstdout and stderr\n\t\t\t\t\t\t\tstreams will block. The logging thread of the\n\t\t\t\t\t\t\tapplication will block as a result. This may cause\n\t\t\t\t\t\t\tthe application to become unresponsive and lead to\n\t\t\t\t\t\t\tcontainer healthcheck failure.

\n

If you use the non-blocking mode,\n\t\t\t\t\t\t\tthe container's logs are instead stored in an\n\t\t\t\t\t\t\tin-memory intermediate buffer configured with the\n\t\t\t\t\t\t\tmax-buffer-size option. This prevents\n\t\t\t\t\t\t\tthe application from becoming unresponsive when\n\t\t\t\t\t\t\tlogs cannot be sent to CloudWatch. We recommend using this mode if you want to\n\t\t\t\t\t\t\tensure service availability and are okay with some\n\t\t\t\t\t\t\tlog loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

\n
\n
max-buffer-size
\n
\n

Required: No

\n

Default value: 1m\n

\n

When non-blocking mode is used,\n\t\t\t\t\t\t\tthe max-buffer-size log option\n\t\t\t\t\t\t\tcontrols the size of the buffer that's used for\n\t\t\t\t\t\t\tintermediate message storage. Make sure to specify\n\t\t\t\t\t\t\tan adequate buffer size based on your application.\n\t\t\t\t\t\t\tWhen the buffer fills up, further logs cannot be\n\t\t\t\t\t\t\tstored. Logs that cannot be stored are lost.\n\t\t\t\t\t\t

\n
\n
\n

To route logs using the splunk log router, you need to specify a\n\t\t\t\tsplunk-token and a\n\t\t\t\tsplunk-url.

\n

When you use the awsfirelens log router to route logs to an Amazon Web Services Service or\n\t\t\t\tAmazon Web Services Partner Network destination for log storage and analytics, you can\n\t\t\t\tset the log-driver-buffer-limit option to limit\n\t\t\t\tthe number of events that are buffered in memory, before\n\t\t\t\tbeing sent to the log router container. It can help to\n\t\t\t\tresolve potential log loss issue because high throughput\n\t\t\t\tmight result in memory running out for the buffer inside of\n\t\t\t\tDocker.

\n

Other options you can specify when using awsfirelens to route\n\t\t\t\tlogs depend on the destination. When you export logs to\n\t\t\t\tAmazon Data Firehose, you can specify the Amazon Web Services Region with\n\t\t\t\tregion and a name for the log stream with\n\t\t\t\tdelivery_stream.

\n

When you export logs to\n\t\t\t\tAmazon Kinesis Data Streams, you can specify an Amazon Web Services Region with\n\t\t\t\tregion and a data stream name with\n\t\t\t\tstream.

\n

When you export logs to Amazon OpenSearch Service,\n\t\t\t\tyou can specify options like Name,\n\t\t\t\tHost (OpenSearch Service endpoint without protocol), Port,\n\t\t\t\tIndex, Type,\n\t\t\t\tAws_auth, Aws_region, Suppress_Type_Name, and\n\t\t\t\ttls.

\n

When you export logs to Amazon S3, you can\n\t\t\t\t\tspecify the bucket using the bucket option. You can also specify region,\n\t\t\t\t\ttotal_file_size, upload_timeout,\n\t\t\t\t\tand use_put_object as options.

\n

This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'\n

" } }, "secretOptions": { @@ -7820,7 +7820,7 @@ } }, "traits": { - "smithy.api#documentation": "

The log configuration for the container. This parameter maps to LogConfig\n\t\t\tin the docker conainer create command and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\t\trun.

\n

By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition.

\n

Understand the following when specifying a log configuration for your\n\t\t\tcontainers.

\n
    \n
  • \n

    Amazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.

    \n

    For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n

    For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\t\t\tjson-file, journald,syslog,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n
  • \n
  • \n

    This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.

    \n
  • \n
  • \n

    For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.

    \n
  • \n
" + "smithy.api#documentation": "

The log configuration for the container. This parameter maps to LogConfig\n\t\t\tin the docker container create command and the\n\t\t\t\t--log-driver option to docker\n\t\t\t\t\trun.

\n

By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition.

\n

Understand the following when specifying a log configuration for your\n\t\t\tcontainers.

\n
    \n
  • \n

    Amazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon. Additional log drivers may be available in future releases of the\n\t\t\t\t\tAmazon ECS container agent.

    \n

    For tasks on Fargate, the supported log drivers are awslogs,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n

    For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\t\t\tawslogs, fluentd, gelf,\n\t\t\t\t\t\tjson-file, journald,syslog,\n\t\t\t\t\t\tsplunk, and awsfirelens.

    \n
  • \n
  • \n

    This parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.

    \n
  • \n
  • \n

    For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.

    \n
  • \n
" } }, "com.amazonaws.ecs#LogConfigurationOptionsMap": { @@ -8591,7 +8591,7 @@ } }, "traits": { - "smithy.api#documentation": "

Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.

\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, specify the exposed ports using containerPort. The\n\t\t\t\thostPort can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort.

\n

Most fields of this parameter (containerPort, hostPort,\n\t\t\tprotocol) maps to PortBindings in the docker conainer create command and the\n\t\t\t\t--publish option to docker\n\t\t\t\t\trun. If the network mode of a task definition is set to\n\t\t\t\thost, host ports must either be undefined or match the container port\n\t\t\tin the port mapping.

\n \n

You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.

\n
\n

After a task reaches the RUNNING status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings section of\n\t\t\tDescribeTasks API responses.

" + "smithy.api#documentation": "

Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.

\n

If you use containers in a task with the awsvpc or host\n\t\t\tnetwork mode, specify the exposed ports using containerPort. The\n\t\t\t\thostPort can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort.

\n

Most fields of this parameter (containerPort, hostPort,\n\t\t\tprotocol) maps to PortBindings in the docker container create command and the\n\t\t\t\t--publish option to docker\n\t\t\t\t\trun. If the network mode of a task definition is set to\n\t\t\t\thost, host ports must either be undefined or match the container port\n\t\t\tin the port mapping.

\n \n

You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.

\n
\n

After a task reaches the RUNNING status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings section of\n\t\t\tDescribeTasks API responses.

" } }, "com.amazonaws.ecs#PortMappingList": { @@ -8847,7 +8847,7 @@ "name": { "target": "com.amazonaws.ecs#SettingName", "traits": { - "smithy.api#documentation": "

The Amazon ECS account setting name to modify.

\n

The following are the valid values for the account setting name.

\n
    \n
  • \n

    \n serviceLongArnFormat - When modified, the Amazon Resource Name\n\t\t\t\t\t(ARN) and resource ID format of the resource type for a specified user, role, or\n\t\t\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting\n\t\t\t\t\tmust be set for each Amazon ECS resource separately. The ARN and resource ID format\n\t\t\t\t\tof a resource is defined by the opt-in status of the user or role that created\n\t\t\t\t\tthe resource. You must turn on this setting to use Amazon ECS features such as\n\t\t\t\t\tresource tagging.

    \n
  • \n
  • \n

    \n taskLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the\n\t\t\t\t\troot user for an account is affected. The opt-in and opt-out account setting must\n\t\t\t\t\tbe set for each Amazon ECS resource separately. The ARN and resource ID format of a\n\t\t\t\t\tresource is defined by the opt-in status of the user or role that created the\n\t\t\t\t\tresource. You must turn on this setting to use Amazon ECS features such as resource\n\t\t\t\t\ttagging.

    \n
  • \n
  • \n

    \n containerInstanceLongArnFormat - When modified, the Amazon\n\t\t\t\t\tResource Name (ARN) and resource ID format of the resource type for a specified\n\t\t\t\t\tuser, role, or the root user for an account is affected. The opt-in and opt-out\n\t\t\t\t\taccount setting must be set for each Amazon ECS resource separately. The ARN and\n\t\t\t\t\tresource ID format of a resource is defined by the opt-in status of the user or\n\t\t\t\t\trole that created the resource. You must turn on this setting to use Amazon ECS\n\t\t\t\t\tfeatures such as resource tagging.

    \n
  • \n
  • \n

    \n awsvpcTrunking - When modified, the elastic network interface\n\t\t\t\t\t(ENI) limit for any new container instances that support the feature is changed.\n\t\t\t\t\tIf awsvpcTrunking is turned on, any new container instances that\n\t\t\t\t\tsupport the feature are launched have the increased ENI limits available to\n\t\t\t\t\tthem. For more information, see Elastic\n\t\t\t\t\t\tNetwork Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n containerInsights - When modified, the default setting indicating\n\t\t\t\t\twhether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed.\n\t\t\t\t\tIf containerInsights is turned on, any new clusters that are\n\t\t\t\t\tcreated will have Container Insights turned on unless you disable it during\n\t\t\t\t\tcluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n dualStackIPv6 - When turned on, when using a VPC in dual stack\n\t\t\t\t\tmode, your tasks using the awsvpc network mode can have an IPv6\n\t\t\t\t\taddress assigned. For more information on using IPv6 with tasks launched on\n\t\t\t\t\tAmazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6\n\t\t\t\t\twith tasks launched on Fargate, see Using a VPC in dual-stack mode.

    \n
  • \n
  • \n

    \n fargateFIPSMode - If you specify fargateFIPSMode,\n\t\t\t\t\tFargate FIPS 140 compliance is affected.

    \n
  • \n
  • \n

    \n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a\n\t\t\t\t\tsecurity or infrastructure update is needed for an Amazon ECS task hosted on\n\t\t\t\t\tFargate, the tasks need to be stopped and new tasks launched to replace them.\n\t\t\t\t\tUse fargateTaskRetirementWaitPeriod to configure the wait time to\n\t\t\t\t\tretire a Fargate task. For information about the Fargate tasks maintenance,\n\t\t\t\t\tsee Amazon Web Services Fargate\n\t\t\t\t\t\ttask maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n tagResourceAuthorization - Amazon ECS is introducing tagging\n\t\t\t\t\tauthorization for resource creation. Users must have permissions for actions\n\t\t\t\t\tthat create the resource, such as ecsCreateCluster. If tags are\n\t\t\t\t\tspecified when you create a resource, Amazon Web Services performs additional authorization to\n\t\t\t\t\tverify if users or roles have permissions to create tags. Therefore, you must\n\t\t\t\t\tgrant explicit permissions to use the ecs:TagResource action. For\n\t\t\t\t\tmore information, see Grant permission to tag resources on creation in the\n\t\t\t\t\t\tAmazon ECS Developer Guide.

    \n
  • \n
  • \n

    \n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    \n
  • \n
", + "smithy.api#documentation": "

The Amazon ECS account setting name to modify.

\n

The following are the valid values for the account setting name.

\n
    \n
  • \n

    \n serviceLongArnFormat - When modified, the Amazon Resource Name\n\t\t\t\t\t(ARN) and resource ID format of the resource type for a specified user, role, or\n\t\t\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting\n\t\t\t\t\tmust be set for each Amazon ECS resource separately. The ARN and resource ID format\n\t\t\t\t\tof a resource is defined by the opt-in status of the user or role that created\n\t\t\t\t\tthe resource. You must turn on this setting to use Amazon ECS features such as\n\t\t\t\t\tresource tagging.

    \n
  • \n
  • \n

    \n taskLongArnFormat - When modified, the Amazon Resource Name (ARN)\n\t\t\t\t\tand resource ID format of the resource type for a specified user, role, or the\n\t\t\t\t\troot user for an account is affected. The opt-in and opt-out account setting must\n\t\t\t\t\tbe set for each Amazon ECS resource separately. The ARN and resource ID format of a\n\t\t\t\t\tresource is defined by the opt-in status of the user or role that created the\n\t\t\t\t\tresource. You must turn on this setting to use Amazon ECS features such as resource\n\t\t\t\t\ttagging.

    \n
  • \n
  • \n

    \n containerInstanceLongArnFormat - When modified, the Amazon\n\t\t\t\t\tResource Name (ARN) and resource ID format of the resource type for a specified\n\t\t\t\t\tuser, role, or the root user for an account is affected. The opt-in and opt-out\n\t\t\t\t\taccount setting must be set for each Amazon ECS resource separately. The ARN and\n\t\t\t\t\tresource ID format of a resource is defined by the opt-in status of the user or\n\t\t\t\t\trole that created the resource. You must turn on this setting to use Amazon ECS\n\t\t\t\t\tfeatures such as resource tagging.

    \n
  • \n
  • \n

    \n awsvpcTrunking - When modified, the elastic network interface\n\t\t\t\t\t(ENI) limit for any new container instances that support the feature is changed.\n\t\t\t\t\tIf awsvpcTrunking is turned on, any new container instances that\n\t\t\t\t\tsupport the feature are launched have the increased ENI limits available to\n\t\t\t\t\tthem. For more information, see Elastic\n\t\t\t\t\t\tNetwork Interface Trunking in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n containerInsights - When modified, the default setting indicating\n\t\t\t\t\twhether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed.\n\t\t\t\t\tIf containerInsights is turned on, any new clusters that are\n\t\t\t\t\tcreated will have Container Insights turned on unless you disable it during\n\t\t\t\t\tcluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

    \n
  • \n
  • \n

    \n dualStackIPv6 - When turned on, when using a VPC in dual stack\n\t\t\t\t\tmode, your tasks using the awsvpc network mode can have an IPv6\n\t\t\t\t\taddress assigned. For more information on using IPv6 with tasks launched on\n\t\t\t\t\tAmazon EC2 instances, see Using a VPC in dual-stack mode. For more information on using IPv6\n\t\t\t\t\twith tasks launched on Fargate, see Using a VPC in dual-stack mode.

    \n
  • \n
  • \n

    \n fargateTaskRetirementWaitPeriod - When Amazon Web Services determines that a\n\t\t\t\t\tsecurity or infrastructure update is needed for an Amazon ECS task hosted on\n\t\t\t\t\tFargate, the tasks need to be stopped and new tasks launched to replace them.\n\t\t\t\t\tUse fargateTaskRetirementWaitPeriod to configure the wait time to\n\t\t\t\t\tretire a Fargate task. For information about the Fargate tasks maintenance,\n\t\t\t\t\tsee Amazon Web Services Fargate\n\t\t\t\t\t\ttask maintenance in the Amazon ECS Developer\n\t\t\t\t\tGuide.

    \n
  • \n
  • \n

    \n tagResourceAuthorization - Amazon ECS is introducing tagging\n\t\t\t\t\tauthorization for resource creation. Users must have permissions for actions\n\t\t\t\t\tthat create the resource, such as ecsCreateCluster. If tags are\n\t\t\t\t\tspecified when you create a resource, Amazon Web Services performs additional authorization to\n\t\t\t\t\tverify if users or roles have permissions to create tags. Therefore, you must\n\t\t\t\t\tgrant explicit permissions to use the ecs:TagResource action. For\n\t\t\t\t\tmore information, see Grant permission to tag resources on creation in the\n\t\t\t\t\t\tAmazon ECS Developer Guide.

    \n
  • \n
  • \n

    \n guardDutyActivate - The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether\n\t\t\tAmazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your\n\t\t\tAmazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -9518,7 +9518,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a new task using the specified task definition.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

Alternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.

\n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

The Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.

\n

To manage eventual consistency, you can do the following:

\n
    \n
  • \n

    Confirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.

    \n
  • \n
  • \n

    Add wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.

    \n
  • \n
", + "smithy.api#documentation": "

Starts a new task using the specified task definition.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

Alternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

\n

The Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.

\n

To manage eventual consistency, you can do the following:

\n
    \n
  • \n

    Confirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.

    \n
  • \n
  • \n

    Add wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.

    \n
  • \n
", "smithy.api#examples": [ { "title": "To run a task on your default cluster", @@ -10632,7 +10632,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

\n

Alternatively, you can useRunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.

\n \n

On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.

\n
\n \n

Amazon Elastic Inference (EI) is no longer available to customers.

\n
\n

Alternatively, you can useRunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

\n

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

" } }, "com.amazonaws.ecs#StartTaskRequest": { @@ -11115,7 +11115,7 @@ } }, "traits": { - "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls in tthe docker conainer create command and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

\n

We don't recommend that you specify network-related systemControls\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc or host network mode. Doing this has the following\n\t\t\tdisadvantages:

\n
    \n
  • \n

    For tasks that use the awsvpc network mode including Fargate,\n\t\t\t\t\tif you set systemControls for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls take effect.

    \n
  • \n
  • \n

    For tasks that use the host network mode, the network namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
\n

If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls values apply to all containers within a\n\t\t\t\t\ttask.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" + "smithy.api#documentation": "

A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\tSysctls in the docker container create command and the --sysctl option to docker run. For example, you can configure\n\t\t\t\tnet.ipv4.tcp_keepalive_time setting to maintain longer lived\n\t\t\tconnections.

\n

We don't recommend that you specify network-related systemControls\n\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\tawsvpc or host network mode. Doing this has the following\n\t\t\tdisadvantages:

\n
    \n
  • \n

    For tasks that use the awsvpc network mode including Fargate,\n\t\t\t\t\tif you set systemControls for any container, it applies to all\n\t\t\t\t\tcontainers in the task. If you set different systemControls for\n\t\t\t\t\tmultiple containers in a single task, the container that's started last\n\t\t\t\t\tdetermines which systemControls take effect.

    \n
  • \n
  • \n

    For tasks that use the host network mode, the network namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
\n

If you're setting an IPC resource namespace to use for the containers in the task, the\n\t\t\tfollowing conditions apply to your system controls. For more information, see IPC mode.

\n
    \n
  • \n

    For tasks that use the host IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls aren't supported.

    \n
  • \n
  • \n

    For tasks that use the task IPC mode, IPC namespace\n\t\t\t\t\t\tsystemControls values apply to all containers within a\n\t\t\t\t\ttask.

    \n
  • \n
\n \n

This parameter is not supported for Windows containers.

\n
\n \n

This parameter is only supported for tasks that are hosted on\n Fargate if the tasks are using platform version 1.4.0 or later\n (Linux). This isn't supported for Windows containers on\n Fargate.

\n
" } }, "com.amazonaws.ecs#SystemControls": { @@ -11602,7 +11602,7 @@ "compatibilities": { "target": "com.amazonaws.ecs#CompatibilityList", "traits": { - "smithy.api#documentation": "

The task launch types the task definition validated against during task definition\n\t\t\tregistration. For more information, see Amazon ECS launch types\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

Amazon ECS validates the task definition parameters with those supported by the launch type. For\n\t\t\tmore information, see Amazon ECS launch types\n\t\t\tin the Amazon Elastic Container Service Developer Guide.

" } }, "runtimePlatform": { @@ -12351,7 +12351,7 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The soft limit for the ulimit type.

", + "smithy.api#documentation": "

The soft limit for the ulimit type. The value can be specified in bytes, seconds, or as a count, depending on the type of the ulimit.

", "smithy.api#required": {} } }, @@ -12359,7 +12359,7 @@ "target": "com.amazonaws.ecs#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "

The hard limit for the ulimit type.

", + "smithy.api#documentation": "

The hard limit for the ulimit type. The value can be specified in bytes, seconds, or as a count, depending on the type of the ulimit.

", "smithy.api#required": {} } } diff --git a/models/elastic-inference.json b/models/elastic-inference.json index 5267a9a05d..234a631100 100644 --- a/models/elastic-inference.json +++ b/models/elastic-inference.json @@ -200,7 +200,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Describes the locations in which a given accelerator type or set of types is present in a given region.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Describes the locations in which a given accelerator type or set of types is present in a given region.\n

", "smithy.api#http": { "method": "POST", "uri": "/describe-accelerator-offerings", @@ -257,7 +257,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput.\n

", "smithy.api#http": { "method": "GET", "uri": "/describe-accelerator-types", @@ -306,7 +306,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Describes information over a provided set of accelerators belonging to an account.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Describes information over a provided set of accelerators belonging to an account.\n

", "smithy.api#http": { "method": "POST", "uri": "/describe-accelerators", @@ -408,7 +408,7 @@ "name": "elastic-inference" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

\n Elastic Inference public APIs.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Elastic Inference public APIs.\n

", "smithy.api#title": "Amazon Elastic Inference", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -452,7 +452,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -495,7 +494,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -508,7 +508,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -522,7 +521,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -545,7 +543,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -580,7 +577,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -591,14 +587,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -612,14 +610,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -628,11 +624,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -643,14 +639,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -664,7 +662,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -684,7 +681,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -695,14 +691,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -713,9 +711,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -1278,7 +1278,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Returns all tags of an Elastic Inference Accelerator.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Returns all tags of an Elastic Inference Accelerator.\n

", "smithy.api#http": { "method": "GET", "uri": "/tags/{resourceArn}", @@ -1488,7 +1488,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Adds the specified tags to an Elastic Inference Accelerator.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Adds the specified tags to an Elastic Inference Accelerator.\n

", "smithy.api#http": { "method": "POST", "uri": "/tags/{resourceArn}", @@ -1571,7 +1571,7 @@ } ], "traits": { - "smithy.api#documentation": "

\n Removes the specified tags from an Elastic Inference Accelerator.\n

\n

\n February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance.\n After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2.\n However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.\n

", + "smithy.api#documentation": "\n

Amazon Elastic Inference is no longer available.

\n
\n

\n Removes the specified tags from an Elastic Inference Accelerator.\n

", "smithy.api#http": { "method": "DELETE", "uri": "/tags/{resourceArn}", diff --git a/models/elastic-load-balancing-v2.json b/models/elastic-load-balancing-v2.json index 5334390929..6f29567491 100644 --- a/models/elastic-load-balancing-v2.json +++ b/models/elastic-load-balancing-v2.json @@ -364,6 +364,32 @@ "smithy.api#output": {} } }, + "com.amazonaws.elasticloadbalancingv2#AdministrativeOverride": { + "type": "structure", + "members": { + "State": { + "target": "com.amazonaws.elasticloadbalancingv2#TargetAdministrativeOverrideStateEnum", + "traits": { + "smithy.api#documentation": "

The state of the override.

" + } + }, + "Reason": { + "target": "com.amazonaws.elasticloadbalancingv2#TargetAdministrativeOverrideReasonEnum", + "traits": { + "smithy.api#documentation": "

The reason code for the state.

" + } + }, + "Description": { + "target": "com.amazonaws.elasticloadbalancingv2#Description", + "traits": { + "smithy.api#documentation": "

A description of the override state that provides additional details.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the override status applied to a target.

" + } + }, "com.amazonaws.elasticloadbalancingv2#AllocationId": { "type": "string" }, @@ -5884,7 +5910,7 @@ "Key": { "target": "com.amazonaws.elasticloadbalancingv2#LoadBalancerAttributeKey", "traits": { - "smithy.api#documentation": "

The name of the attribute.

\n

The following attributes are supported by all load balancers:

\n
    \n
  • \n

    \n deletion_protection.enabled - Indicates whether deletion protection is\n enabled. The value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n load_balancing.cross_zone.enabled - Indicates whether cross-zone load\n balancing is enabled. The possible values are true and false.\n The default for Network Load Balancers and Gateway Load Balancers is false. \n The default for Application Load Balancers is true, and cannot be changed.

    \n
  • \n
\n

The following attributes are supported by both Application Load Balancers and Network Load\n Balancers:

\n
    \n
  • \n

    \n access_logs.s3.enabled - Indicates whether access logs are enabled. The\n value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n access_logs.s3.bucket - The name of the S3 bucket for the access logs.\n This attribute is required if access logs are enabled. The bucket must exist in the same\n region as the load balancer and have a bucket policy that grants Elastic Load Balancing\n permissions to write to the bucket.

    \n
  • \n
  • \n

    \n access_logs.s3.prefix - The prefix for the location in the S3 bucket for the\n access logs.

    \n
  • \n
  • \n

    \n ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the\n load balancer. It is set to false for internet-facing load balancers and\n true for internal load balancers, preventing unintended access to your\n internal load balancer through an internet gateway.

    \n
  • \n
\n

The following attributes are supported by only Application Load Balancers:

\n
    \n
  • \n

    \n idle_timeout.timeout_seconds - The idle timeout value, in seconds. The\n valid range is 1-4000 seconds. The default is 60 seconds.

    \n
  • \n
  • \n

    \n client_keep_alive.seconds - The client keep alive value, in seconds. The \n valid range is 60-604800 seconds. The default is 3600 seconds.

    \n
  • \n
  • \n

    \n connection_logs.s3.enabled - Indicates whether connection logs are enabled. The\n value is true or false. The default is false.

    \n
  • \n
  • \n

    \n connection_logs.s3.bucket - The name of the S3 bucket for the connection logs.\n This attribute is required if connection logs are enabled. The bucket must exist in the same\n region as the load balancer and have a bucket policy that grants Elastic Load Balancing\n permissions to write to the bucket.

    \n
  • \n
  • \n

    \n connection_logs.s3.prefix - The prefix for the location in the S3 bucket for the\n connection logs.

    \n
  • \n
  • \n

    \n routing.http.desync_mitigation_mode - Determines how the load balancer\n handles requests that might pose a security risk to your application. The possible values\n are monitor, defensive, and strictest. The default\n is defensive.

    \n
  • \n
  • \n

    \n routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP\n headers with invalid header fields are removed by the load balancer (true) or\n routed to targets (false). The default is false.

    \n
  • \n
  • \n

    \n routing.http.preserve_host_header.enabled - Indicates whether the\n Application Load Balancer should preserve the Host header in the HTTP request\n and send it to the target without any change. The possible values are true\n and false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates\n whether the two headers (x-amzn-tls-version and\n x-amzn-tls-cipher-suite), which contain information about the negotiated\n TLS version and cipher suite, are added to the client request before sending it to the\n target. The x-amzn-tls-version header has information about the TLS protocol\n version negotiated with the client, and the x-amzn-tls-cipher-suite header\n has information about the cipher suite negotiated with the client. Both headers are in\n OpenSSL format. The possible values for the attribute are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_client_port.enabled - Indicates whether the\n X-Forwarded-For header should preserve the source port that the client used\n to connect to the load balancer. The possible values are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_header_processing.mode - Enables you to modify,\n preserve, or remove the X-Forwarded-For header in the HTTP request before the\n Application Load Balancer sends the request to the target. The possible values are\n append, preserve, and remove. The default is\n append.

    \n
      \n
    • \n

      If the value is append, the Application Load Balancer adds the client\n IP address (of the last hop) to the X-Forwarded-For header in the HTTP\n request before it sends it to targets.

      \n
    • \n
    • \n

      If the value is preserve the Application Load Balancer preserves the\n X-Forwarded-For header in the HTTP request, and sends it to targets\n without any change.

      \n
    • \n
    • \n

      If the value is remove, the Application Load Balancer removes the\n X-Forwarded-For header in the HTTP request before it sends it to\n targets.

      \n
    • \n
    \n
  • \n
  • \n

    \n routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible\n values are true and false. The default is true.\n Elastic Load Balancing requires that message header names contain only alphanumeric\n characters and hyphens.

    \n
  • \n
  • \n

    \n waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load\n balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The\n default is false.

    \n
  • \n
\n

The following attributes are supported by only Network Load Balancers:

\n
    \n
  • \n

    \n dns_record.client_routing_policy - Indicates how traffic is \n distributed among the load balancer Availability Zones. The possible values are \n availability_zone_affinity with 100 percent zonal affinity, \n partial_availability_zone_affinity with 85 percent zonal affinity, \n and any_availability_zone with 0 percent zonal affinity.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the attribute.

\n

The following attributes are supported by all load balancers:

\n
    \n
  • \n

    \n deletion_protection.enabled - Indicates whether deletion protection is\n enabled. The value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n load_balancing.cross_zone.enabled - Indicates whether cross-zone load\n balancing is enabled. The possible values are true and false.\n The default for Network Load Balancers and Gateway Load Balancers is false. \n The default for Application Load Balancers is true, and cannot be changed.

    \n
  • \n
\n

The following attributes are supported by both Application Load Balancers and Network Load\n Balancers:

\n
    \n
  • \n

    \n access_logs.s3.enabled - Indicates whether access logs are enabled. The\n value is true or false. The default is\n false.

    \n
  • \n
  • \n

    \n access_logs.s3.bucket - The name of the S3 bucket for the access logs.\n This attribute is required if access logs are enabled. The bucket must exist in the same\n region as the load balancer and have a bucket policy that grants Elastic Load Balancing\n permissions to write to the bucket.

    \n
  • \n
  • \n

    \n access_logs.s3.prefix - The prefix for the location in the S3 bucket for the\n access logs.

    \n
  • \n
  • \n

    \n ipv6.deny_all_igw_traffic - Blocks internet gateway (IGW) access to the\n load balancer. It is set to false for internet-facing load balancers and\n true for internal load balancers, preventing unintended access to your\n internal load balancer through an internet gateway.

    \n
  • \n
\n

The following attributes are supported by only Application Load Balancers:

\n
    \n
  • \n

    \n idle_timeout.timeout_seconds - The idle timeout value, in seconds. The\n valid range is 1-4000 seconds. The default is 60 seconds.

    \n
  • \n
  • \n

    \n client_keep_alive.seconds - The client keep alive value, in seconds. The \n valid range is 60-604800 seconds. The default is 3600 seconds.

    \n
  • \n
  • \n

    \n connection_logs.s3.enabled - Indicates whether connection logs are enabled. The\n value is true or false. The default is false.

    \n
  • \n
  • \n

    \n connection_logs.s3.bucket - The name of the S3 bucket for the connection logs.\n This attribute is required if connection logs are enabled. The bucket must exist in the same\n region as the load balancer and have a bucket policy that grants Elastic Load Balancing\n permissions to write to the bucket.

    \n
  • \n
  • \n

    \n connection_logs.s3.prefix - The prefix for the location in the S3 bucket for the\n connection logs.

    \n
  • \n
  • \n

    \n routing.http.desync_mitigation_mode - Determines how the load balancer\n handles requests that might pose a security risk to your application. The possible values\n are monitor, defensive, and strictest. The default\n is defensive.

    \n
  • \n
  • \n

    \n routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP\n headers with invalid header fields are removed by the load balancer (true) or\n routed to targets (false). The default is false.

    \n
  • \n
  • \n

    \n routing.http.preserve_host_header.enabled - Indicates whether the\n Application Load Balancer should preserve the Host header in the HTTP request\n and send it to the target without any change. The possible values are true\n and false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.x_amzn_tls_version_and_cipher_suite.enabled - Indicates\n whether the two headers (x-amzn-tls-version and\n x-amzn-tls-cipher-suite), which contain information about the negotiated\n TLS version and cipher suite, are added to the client request before sending it to the\n target. The x-amzn-tls-version header has information about the TLS protocol\n version negotiated with the client, and the x-amzn-tls-cipher-suite header\n has information about the cipher suite negotiated with the client. Both headers are in\n OpenSSL format. The possible values for the attribute are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_client_port.enabled - Indicates whether the\n X-Forwarded-For header should preserve the source port that the client used\n to connect to the load balancer. The possible values are true and\n false. The default is false.

    \n
  • \n
  • \n

    \n routing.http.xff_header_processing.mode - Enables you to modify,\n preserve, or remove the X-Forwarded-For header in the HTTP request before the\n Application Load Balancer sends the request to the target. The possible values are\n append, preserve, and remove. The default is\n append.

    \n
      \n
    • \n

      If the value is append, the Application Load Balancer adds the client\n IP address (of the last hop) to the X-Forwarded-For header in the HTTP\n request before it sends it to targets.

      \n
    • \n
    • \n

      If the value is preserve the Application Load Balancer preserves the\n X-Forwarded-For header in the HTTP request, and sends it to targets\n without any change.

      \n
    • \n
    • \n

      If the value is remove, the Application Load Balancer removes the\n X-Forwarded-For header in the HTTP request before it sends it to\n targets.

      \n
    • \n
    \n
  • \n
  • \n

    \n routing.http2.enabled - Indicates whether HTTP/2 is enabled. The possible\n values are true and false. The default is true.\n Elastic Load Balancing requires that message header names contain only alphanumeric\n characters and hyphens.

    \n
  • \n
  • \n

    \n waf.fail_open.enabled - Indicates whether to allow a WAF-enabled load\n balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true and false. The\n default is false.

    \n
  • \n
\n

The following attributes are supported by only Network Load Balancers:

\n
    \n
  • \n

    \n dns_record.client_routing_policy - Indicates how traffic is \n distributed among the load balancer Availability Zones. The possible values are \n availability_zone_affinity with 100 percent zonal affinity, \n partial_availability_zone_affinity with 85 percent zonal affinity, \n and any_availability_zone with 0 percent zonal affinity.

    \n
  • \n
  • \n

    \n zonal_shift.config.enabled - Indicates whether zonal shift is \n enabled. The possible values are true and false. The \n default is false.

    \n
  • \n
" } }, "Value": { @@ -8486,6 +8512,64 @@ "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" } }, + "com.amazonaws.elasticloadbalancingv2#TargetAdministrativeOverrideReasonEnum": { + "type": "enum", + "members": { + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AdministrativeOverride.Unknown" + } + }, + "NO_OVERRIDE_ENGAGED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AdministrativeOverride.NoOverride" + } + }, + "ZONAL_SHIFT_ENGAGED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AdministrativeOverride.ZonalShiftActive" + } + }, + "ZONAL_SHIFT_DELEGATED_TO_DNS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AdministrativeOverride.ZonalShiftDelegatedToDns" + } + } + } + }, + "com.amazonaws.elasticloadbalancingv2#TargetAdministrativeOverrideStateEnum": { + "type": "enum", + "members": { + "UNKNOWN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "unknown" + } + }, + "NO_OVERRIDE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "no_override" + } + }, + "ZONAL_SHIFT_ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "zonal_shift_active" + } + }, + "ZONAL_SHIFT_DELEGATED_TO_DNS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "zonal_shift_delegated_to_dns" + } + } + } + }, "com.amazonaws.elasticloadbalancingv2#TargetDescription": { "type": "structure", "members": { @@ -8857,6 +8941,12 @@ "traits": { "smithy.api#documentation": "

The anomaly detection result for the target.

\n

If no anomalies were detected, the result is normal.

\n

If anomalies were detected, the result is anomalous.

" } + }, + "AdministrativeOverride": { + "target": "com.amazonaws.elasticloadbalancingv2#AdministrativeOverride", + "traits": { + "smithy.api#documentation": "

The administrative override information for the target.

" + } } }, "traits": { diff --git a/models/elasticache.json b/models/elasticache.json index c6cea8493a..cb2b8e9e0c 100644 --- a/models/elasticache.json +++ b/models/elasticache.json @@ -203,13 +203,13 @@ "ScaleUpModifications": { "target": "com.amazonaws.elasticache#NodeTypeList", "traits": { - "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group.

\n

When scaling up a Redis OSS cluster or replication group using\n ModifyCacheCluster or ModifyReplicationGroup, use a value\n from this list for the CacheNodeType parameter.

" + "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group.

\n

When scaling up a Valkey or Redis OSS cluster or replication group using\n ModifyCacheCluster or ModifyReplicationGroup, use a value\n from this list for the CacheNodeType parameter.

" } }, "ScaleDownModifications": { "target": "com.amazonaws.elasticache#NodeTypeList", "traits": { - "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group. When scaling down a Redis OSS cluster or\n replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from\n this list for the CacheNodeType parameter.

" + "smithy.api#documentation": "

A string list, each element of which specifies a cache node type which you can use to\n scale your cluster or replication group. When scaling down a Valkey or Redis OSS cluster or\n replication group using ModifyCacheCluster or ModifyReplicationGroup, use a value from\n this list for the CacheNodeType parameter.

" } } }, @@ -1886,7 +1886,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the compute and memory capacity node type for the cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the compute and memory capacity node type for the cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -1910,7 +1910,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of cache nodes in the cluster.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" + "smithy.api#documentation": "

The number of cache nodes in the cluster.

\n

For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" } }, "PreferredAvailabilityZone": { @@ -1973,7 +1973,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Valkey or Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SecurityGroups": { @@ -2003,7 +2003,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis OSS \n commands.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Valkey or Redis OSS \n commands.

\n

Default: false\n

" } }, "AuthTokenLastModifiedDate": { @@ -2045,13 +2045,13 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type associated with the cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type associated with the cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "TransitEncryptionMode": { @@ -2262,7 +2262,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached or\n Redis OSS.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

Represents an individual cache node within a cluster. Each cache node runs its own\n instance of the cluster's protocol-compliant caching software - either Memcached, Valkey or Redis OSS.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "com.amazonaws.elasticache#CacheNodeIdsList": { @@ -2342,7 +2342,7 @@ } }, "traits": { - "smithy.api#documentation": "

A parameter that has a different value for each cache node type it is applied to. For\n example, in a Redis OSS cluster, a cache.m1.large cache node type would have a\n larger maxmemory value than a cache.m1.small type.

" + "smithy.api#documentation": "

A parameter that has a different value for each cache node type it is applied to. For\n example, in a Valkey or Redis OSS cluster, a cache.m1.large cache node type would have a\n larger maxmemory value than a cache.m1.small type.

" } }, "com.amazonaws.elasticache#CacheNodeTypeSpecificParametersList": { @@ -2826,7 +2826,7 @@ "SupportedNetworkTypes": { "target": "com.amazonaws.elasticache#NetworkTypeList", "traits": { - "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } } }, @@ -3109,7 +3109,7 @@ "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The 4-digit id for the node group you are configuring. For Redis OSS (cluster mode\n disabled) replication groups, the node group id is always 0001. To find a Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's\n Id.

", + "smithy.api#documentation": "

The 4-digit id for the node group you are configuring. For Valkey or Redis OSS (cluster mode\n disabled) replication groups, the node group id is always 0001. To find a Valkey or Redis OSS (cluster mode enabled)'s node group's (shard's) id, see Finding a Shard's\n Id.

", "smithy.api#required": {} } }, @@ -3117,14 +3117,14 @@ "target": "com.amazonaws.elasticache#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The number of replicas you want in this node group at the end of this operation.\n The maximum value for NewReplicaCount is 5. The minimum value depends upon\n the type of Redis OSS replication group you are working with.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Redis OSS (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ: 1

      \n
    • \n
    • \n

      If Multi-AZ: 0

      \n
    • \n
    \n
  • \n
  • \n

    Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
", + "smithy.api#documentation": "

The number of replicas you want in this node group at the end of this operation.\n The maximum value for NewReplicaCount is 5. The minimum value depends upon\n the type of Valkey or Redis OSS replication group you are working with.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Valkey or Redis OSS (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ: 1

      \n
    • \n
    • \n

      If Multi-AZ: 0

      \n
    • \n
    \n
  • \n
  • \n

    Valkey or Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
", "smithy.api#required": {} } }, "PreferredAvailabilityZones": { "target": "com.amazonaws.elasticache#PreferredAvailabilityZoneList", "traits": { - "smithy.api#documentation": "

A list of PreferredAvailabilityZone strings that specify which\n availability zones the replication group's nodes are to be in. The nummber of\n PreferredAvailabilityZone values must equal the value of\n NewReplicaCount plus 1 to account for the primary node. If this member\n of ReplicaConfiguration is omitted, ElastiCache (Redis OSS) selects the\n availability zone for each of the replicas.

" + "smithy.api#documentation": "

A list of PreferredAvailabilityZone strings that specify which\n availability zones the replication group's nodes are to be in. The nummber of\n PreferredAvailabilityZone values must equal the value of\n NewReplicaCount plus 1 to account for the primary node. If this member\n of ReplicaConfiguration is omitted, ElastiCache selects the\n availability zone for each of the replicas.

" } }, "PreferredOutpostArns": { @@ -3173,7 +3173,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

Creates a copy of an existing serverless cache’s snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#CopyServerlessCacheSnapshotRequest": { @@ -3183,7 +3183,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the existing serverless cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

The identifier of the existing serverless cache’s snapshot to be copied. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } }, @@ -3191,20 +3191,20 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier for the snapshot to be created. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

The identifier for the snapshot to be created. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the KMS key used to encrypt the target snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The identifier of the KMS key used to encrypt the target snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "

A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL

" + "smithy.api#documentation": "

A list of tags to be added to the target snapshot resource. A tag is a key-value pair. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL

" } } }, @@ -3218,7 +3218,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The response for the attempt to copy the serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The response for the attempt to copy the serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -3258,7 +3258,7 @@ } ], "traits": { - "smithy.api#documentation": "

Makes a copy of an existing snapshot.

\n \n

This operation is valid for Redis OSS only.

\n
\n \n

Users or groups that have permissions to use the CopySnapshot\n operation can create their own Amazon S3 buckets and copy snapshots to it. To\n control access to your snapshots, use an IAM policy to control who has the ability\n to use the CopySnapshot operation. For more information about using IAM\n to control the use of ElastiCache operations, see Exporting\n Snapshots and Authentication & Access\n Control.

\n
\n

You could receive the following error messages.

\n

\n Error Messages\n

\n
    \n
  • \n

    \n Error Message: The S3 bucket %s is outside of\n the region.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s does not\n exist.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s is not owned\n by the authenticated user.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The authenticated user does\n not have sufficient permissions to perform the desired activity.

    \n

    \n Solution: Contact your system administrator\n to get the needed permissions.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s already\n contains an object with key %s.

    \n

    \n Solution: Give the\n TargetSnapshotName a new and unique value. If exporting a\n snapshot, you could alternatively create a new Amazon S3 bucket and use this\n same value for TargetSnapshotName.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ permissions %s on the S3 Bucket.

    \n

    \n Solution: Add List and Read permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted WRITE permissions %s on the S3 Bucket.

    \n

    \n Solution: Add Upload/Delete permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ_ACP permissions %s on the S3 Bucket.

    \n

    \n Solution: Add View Permissions on the bucket.\n For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
", + "smithy.api#documentation": "

Makes a copy of an existing snapshot.

\n \n

This operation is valid for Valkey or Redis OSS only.

\n
\n \n

Users or groups that have permissions to use the CopySnapshot\n operation can create their own Amazon S3 buckets and copy snapshots to it. To\n control access to your snapshots, use an IAM policy to control who has the ability\n to use the CopySnapshot operation. For more information about using IAM\n to control the use of ElastiCache operations, see Exporting\n Snapshots and Authentication & Access\n Control.

\n
\n

You could receive the following error messages.

\n

\n Error Messages\n

\n
    \n
  • \n

    \n Error Message: The S3 bucket %s is outside of\n the region.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s does not\n exist.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s is not owned\n by the authenticated user.

    \n

    \n Solution: Create an Amazon S3 bucket in the\n same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User\n Guide.

    \n
  • \n
  • \n

    \n Error Message: The authenticated user does\n not have sufficient permissions to perform the desired activity.

    \n

    \n Solution: Contact your system administrator\n to get the needed permissions.

    \n
  • \n
  • \n

    \n Error Message: The S3 bucket %s already\n contains an object with key %s.

    \n

    \n Solution: Give the\n TargetSnapshotName a new and unique value. If exporting a\n snapshot, you could alternatively create a new Amazon S3 bucket and use this\n same value for TargetSnapshotName.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ permissions %s on the S3 Bucket.

    \n

    \n Solution: Add List and Read permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted WRITE permissions %s on the S3 Bucket.

    \n

    \n Solution: Add Upload/Delete permissions on\n the bucket. For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
  • \n

    \n Error Message: ElastiCache has not been\n granted READ_ACP permissions %s on the S3 Bucket.

    \n

    \n Solution: Add View Permissions on the bucket.\n For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the\n ElastiCache User Guide.

    \n
  • \n
", "smithy.api#examples": [ { "title": "CopySnapshot", @@ -3409,7 +3409,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache\n engine software, either Memcached or Redis OSS.

\n

This operation is not supported for Redis OSS (cluster mode enabled) clusters.

", + "smithy.api#documentation": "

Creates a cluster. All nodes in the cluster run the same protocol-compliant cache\n engine software, either Memcached, Valkey or Redis OSS.

\n

This operation is not supported for Valkey or Redis OSS (cluster mode enabled) clusters.

", "smithy.api#examples": [ { "title": "CreateCacheCluster", @@ -3528,13 +3528,13 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The initial number of cache nodes that the cluster has.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n

If you need more than 40 nodes for your Memcached cluster, please fill out the\n ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

" + "smithy.api#documentation": "

The initial number of cache nodes that the cluster has.

\n

For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n

If you need more than 40 nodes for your Memcached cluster, please fill out the\n ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/.

" } }, "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -3582,13 +3582,13 @@ "SnapshotArns": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "

A single-element string list containing an Amazon Resource Name (ARN) that uniquely\n identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to\n populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any\n commas.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" + "smithy.api#documentation": "

A single-element string list containing an Amazon Resource Name (ARN) that uniquely\n identifies a Valkey or Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to\n populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any\n commas.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" } }, "SnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of a Redis OSS snapshot from which to restore data into the new node group\n (shard). The snapshot status changes to restoring while the new node group\n (shard) is being created.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
" + "smithy.api#documentation": "

The name of a Valkey or Redis OSS snapshot from which to restore data into the new node group\n (shard). The snapshot status changes to restoring while the new node group\n (shard) is being created.

\n \n

This parameter is only valid if the Engine parameter is\n redis.

\n
" } }, "PreferredMaintenanceWindow": { @@ -3612,7 +3612,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes \n to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -3666,13 +3666,13 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } } }, @@ -4023,7 +4023,7 @@ } ], "traits": { - "smithy.api#documentation": "

Global Datastore for Redis OSS offers fully managed, fast, reliable and secure\n cross-region replication. Using Global Datastore for Redis OSS, you can create cross-region\n read replica clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster\n recovery across regions. For more information, see Replication\n Across Regions Using Global Datastore.

\n
    \n
  • \n

    The GlobalReplicationGroupIdSuffix is the\n name of the Global datastore.

    \n
  • \n
  • \n

    The PrimaryReplicationGroupId represents the\n name of the primary cluster that accepts writes and will replicate updates to\n the secondary cluster.

    \n
  • \n
" + "smithy.api#documentation": "

Global Datastore offers fully managed, fast, reliable and secure\n cross-region replication. Using Global Datastore with Valkey or Redis OSS, you can create cross-region\n read replica clusters for ElastiCache to enable low-latency reads and disaster\n recovery across regions. For more information, see Replication\n Across Regions Using Global Datastore.

\n
    \n
  • \n

    The GlobalReplicationGroupIdSuffix is the\n name of the Global datastore.

    \n
  • \n
  • \n

    The PrimaryReplicationGroupId represents the\n name of the primary cluster that accepts writes and will replicate updates to\n the secondary cluster.

    \n
  • \n
" } }, "com.amazonaws.elasticache#CreateGlobalReplicationGroupMessage": { @@ -4135,7 +4135,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) replication\n group.

\n

This API can be used to create a standalone regional replication group or a secondary\n replication group associated with a Global datastore.

\n

A Redis OSS (cluster mode disabled) replication group is a collection of nodes, where\n one of the nodes is a read/write primary and the others are read-only replicas.\n Writes to the primary are asynchronously propagated to the replicas.

\n

A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI:\n node groups). Each shard has a primary node and up to 5 read-only replica nodes. The\n configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which\n is the maximum number or replicas allowed.

\n

The node or shard limit can be increased to a maximum of 500 per cluster if the Redis OSS \n engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node\n cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500\n shards (single primary and no replicas). Make sure there are enough available IP\n addresses to accommodate the increase. Common pitfalls include the subnets in the subnet\n group have too small a CIDR range or the subnets are shared and heavily used by other\n clusters. For more information, see Creating a Subnet\n Group. For versions below 5.0.6, the limit is 250 per cluster.

\n

To request a limit increase, see Amazon Service Limits and\n choose the limit type Nodes per cluster per instance\n type.

\n

When a Redis OSS (cluster mode disabled) replication group has been successfully created,\n you can add one or more read replicas to it, up to a total of 5 read replicas. If you\n need to increase or decrease the number of node groups (console: shards), you can use ElastiCache (Redis OSS) scaling. \n For more information, see Scaling\n ElastiCache (Redis OSS) Clusters in the ElastiCache User\n Guide.

\n \n

This operation is valid for Redis OSS only.

\n
", + "smithy.api#documentation": "

Creates a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled) replication\n group.

\n

This API can be used to create a standalone regional replication group or a secondary\n replication group associated with a Global datastore.

\n

A Valkey or Redis OSS (cluster mode disabled) replication group is a collection of nodes, where\n one of the nodes is a read/write primary and the others are read-only replicas.\n Writes to the primary are asynchronously propagated to the replicas.

\n

A Valkey or Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI:\n node groups). Each shard has a primary node and up to 5 read-only replica nodes. The\n configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which\n is the maximum number or replicas allowed.

\n

The node or shard limit can be increased to a maximum of 500 per cluster if the Valkey or Redis OSS \n engine version is 5.0.6 or higher. For example, you can choose to configure a 500 node\n cluster that ranges between 83 shards (one primary and 5 replicas per shard) and 500\n shards (single primary and no replicas). Make sure there are enough available IP\n addresses to accommodate the increase. Common pitfalls include the subnets in the subnet\n group have too small a CIDR range or the subnets are shared and heavily used by other\n clusters. For more information, see Creating a Subnet\n Group. For versions below 5.0.6, the limit is 250 per cluster.

\n

To request a limit increase, see Amazon Service Limits and\n choose the limit type Nodes per cluster per instance\n type.

\n

When a Valkey or Redis OSS (cluster mode disabled) replication group has been successfully created,\n you can add one or more read replicas to it, up to a total of 5 read replicas. If you\n need to increase or decrease the number of node groups (console: shards), you can use scaling. \n For more information, see Scaling self-designed clusters in the ElastiCache User\n Guide.

\n \n

This operation is valid for Valkey and Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "CreateCacheReplicationGroup", @@ -4255,7 +4255,7 @@ "AutomaticFailoverEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

Specifies whether a read-only replica is automatically promoted to read/write primary\n if the existing primary fails.

\n

\n AutomaticFailoverEnabled must be enabled for Redis OSS (cluster mode enabled)\n replication groups.

\n

Default: false

" + "smithy.api#documentation": "

Specifies whether a read-only replica is automatically promoted to read/write primary\n if the existing primary fails.

\n

\n AutomaticFailoverEnabled must be enabled for Valkey or Redis OSS (cluster mode enabled)\n replication groups.

\n

Default: false

" } }, "MultiAZEnabled": { @@ -4279,7 +4279,7 @@ "NumNodeGroups": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit\n this parameter or set it to 1.

\n

Default: 1

" + "smithy.api#documentation": "

An optional parameter that specifies the number of node groups (shards) for this Valkey or Redis OSS (cluster mode enabled) replication group. For Valkey or Redis OSS (cluster mode disabled) either omit\n this parameter or set it to 1.

\n

Default: 1

" } }, "ReplicasPerNodeGroup": { @@ -4291,13 +4291,13 @@ "NodeGroupConfiguration": { "target": "com.amazonaws.elasticache#NodeGroupConfigurationList", "traits": { - "smithy.api#documentation": "

A list of node group (shard) configuration options. Each node group (shard)\n configuration has the following members: PrimaryAvailabilityZone,\n ReplicaAvailabilityZones, ReplicaCount, and\n Slots.

\n

If you're creating a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled)\n replication group, you can use this parameter to individually configure each node group\n (shard), or you can omit this parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group\n (shard) using this parameter because you must specify the slots for each node\n group.

" + "smithy.api#documentation": "

A list of node group (shard) configuration options. Each node group (shard)\n configuration has the following members: PrimaryAvailabilityZone,\n ReplicaAvailabilityZones, ReplicaCount, and\n Slots.

\n

If you're creating a Valkey or Redis OSS (cluster mode disabled) or a Valkey or Redis OSS (cluster mode enabled)\n replication group, you can use this parameter to individually configure each node group\n (shard), or you can omit this parameter. However, it is required when seeding a Valkey or Redis OSS (cluster mode enabled) cluster from a S3 rdb file. You must configure each node group\n (shard) using this parameter because you must specify the slots for each node\n group.

" } }, "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The compute and memory capacity of the nodes in the node group (shard).

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -4315,7 +4315,7 @@ "CacheParameterGroupName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the parameter group to associate with this replication group. If this\n argument is omitted, the default cache parameter group for the specified engine is\n used.

\n

If you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want\n to use a default parameter group, we recommend that you specify the parameter group by\n name.

\n
    \n
  • \n

    To create a Redis OSS (cluster mode disabled) replication group, use\n CacheParameterGroupName=default.redis3.2.

    \n
  • \n
  • \n

    To create a Redis OSS (cluster mode enabled) replication group, use\n CacheParameterGroupName=default.redis3.2.cluster.on.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the parameter group to associate with this replication group. If this\n argument is omitted, the default cache parameter group for the specified engine is\n used.

\n

If you are running Valkey or Redis OSS version 3.2.4 or later, only one node group (shard), and want\n to use a default parameter group, we recommend that you specify the parameter group by\n name.

\n
    \n
  • \n

    To create a Valkey or Redis OSS (cluster mode disabled) replication group, use\n CacheParameterGroupName=default.redis3.2.

    \n
  • \n
  • \n

    To create a Valkey or Redis OSS (cluster mode enabled) replication group, use\n CacheParameterGroupName=default.redis3.2.cluster.on.

    \n
  • \n
" } }, "CacheSubnetGroupName": { @@ -4345,7 +4345,7 @@ "SnapshotArns": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "

A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot\n files stored in Amazon S3. The snapshot files are used to populate the new replication\n group. The Amazon S3 object name in the ARN cannot contain any commas. The new\n replication group will have the number of node groups (console: shards) specified by the\n parameter NumNodeGroups or the number of node groups configured by\n NodeGroupConfiguration regardless of the number of ARNs\n specified here.

\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" + "smithy.api#documentation": "

A list of Amazon Resource Names (ARN) that uniquely identify the Valkey or Redis OSS RDB snapshot\n files stored in Amazon S3. The snapshot files are used to populate the new replication\n group. The Amazon S3 object name in the ARN cannot contain any commas. The new\n replication group will have the number of node groups (console: shards) specified by the\n parameter NumNodeGroups or the number of node groups configured by\n NodeGroupConfiguration regardless of the number of ARNs\n specified here.

\n

Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb\n

" } }, "SnapshotName": { @@ -4375,7 +4375,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes \n to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -4435,31 +4435,31 @@ "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when creating a replication group, either\n ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS\n engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on\n the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when creating a replication group, either\n ipv4 | ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on\n the Nitro system.

" } }, "TransitEncryptionMode": { "target": "com.amazonaws.elasticache#TransitEncryptionMode", "traits": { - "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

When setting TransitEncryptionEnabled to true, you can set\n your TransitEncryptionMode to preferred in the same request,\n to allow both encrypted and unencrypted connections at the same time. Once you migrate\n all your Redis OSS clients to use encrypted connections you can modify the value to\n required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

\n

This process will not trigger the replacement of the replication group.

" + "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

When setting TransitEncryptionEnabled to true, you can set\n your TransitEncryptionMode to preferred in the same request,\n to allow both encrypted and unencrypted connections at the same time. Once you migrate\n all your Valkey or Redis OSS clients to use encrypted connections you can modify the value to\n required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

\n

This process will not trigger the replacement of the replication group.

" } }, "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } }, "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the snapshot used to create a replication group. Available for Redis OSS only.

" + "smithy.api#documentation": "

The name of the snapshot used to create a replication group. Available for Valkey, Redis OSS only.

" } } }, @@ -4578,7 +4578,7 @@ "SnapshotArnsToRestore": { "target": "com.amazonaws.elasticache#SnapshotArnsList", "traits": { - "smithy.api#documentation": "

The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The ARN(s) of the snapshot that the new serverless cache will be created from. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "Tags": { @@ -4590,7 +4590,7 @@ "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. Default is NULL.

" + "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL.

" } }, "SubnetIds": { @@ -4602,13 +4602,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of snapshots that will be retained for the serverless cache that is being created. \n As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The number of snapshots that will be retained for the serverless cache that is being created. \n As new snapshots beyond this limit are added, the oldest snapshots will be deleted on a rolling basis. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The daily time that snapshots will be created from the new serverless cache. By default this number is populated with \n 0, i.e. no snapshots will be created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The daily time that snapshots will be created from the new serverless cache. By default this number is populated with \n 0, i.e. no snapshots will be created on an automatic daily basis. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -4665,7 +4665,7 @@ } ], "traits": { - "smithy.api#documentation": "

This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

This API creates a copy of an entire ServerlessCache at a specific moment in time. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#CreateServerlessCacheSnapshotRequest": { @@ -4675,7 +4675,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name for the snapshot being created. Must be unique for the customer account. Available for Redis OSS and Serverless Memcached only.\n Must be between 1 and 255 characters.

", + "smithy.api#documentation": "

The name for the snapshot being created. Must be unique for the customer account. Available for Valkey, Redis OSS and Serverless Memcached only.\n Must be between 1 and 255 characters.

", "smithy.api#required": {} } }, @@ -4683,20 +4683,20 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of an existing serverless cache. The snapshot is created from this cache. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

The name of an existing serverless cache. The snapshot is created from this cache. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS and Serverless Memcached only. Default: NULL

" + "smithy.api#documentation": "

The ID of the KMS key used to encrypt the snapshot. Available for Valkey, Redis OSS and Serverless Memcached only. Default: NULL

" } }, "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "

A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

A list of tags to be added to the snapshot resource. A tag is a key-value pair. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -4710,7 +4710,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The state of a serverless cache snapshot at a specific point in time, to the millisecond. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -4759,7 +4759,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an entire cluster or replication group at a specific moment in\n time.

\n \n

This operation is valid for Redis OSS only.

\n
", + "smithy.api#documentation": "

Creates a copy of an entire cluster or replication group at a specific moment in\n time.

\n \n

This operation is valid for Valkey or Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "CreateSnapshot - NonClustered Redis, 2 read-replicas", @@ -4962,7 +4962,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, see\n Using Role Based Access Control (RBAC).

" + "smithy.api#documentation": "

For Valkey engine version 7.2 onwards and Redis OSS 6.0 and onwards: Creates a user. For more information, see\n Using Role Based Access Control (RBAC).

" } }, "com.amazonaws.elasticache#CreateUserGroup": { @@ -5000,7 +5000,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more\n information, see Using Role Based Access Control (RBAC)\n

" + "smithy.api#documentation": "

For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Creates a user group. For more\n information, see Using Role Based Access Control (RBAC)\n

" } }, "com.amazonaws.elasticache#CreateUserGroupMessage": { @@ -5031,7 +5031,7 @@ "Tags": { "target": "com.amazonaws.elasticache#TagList", "traits": { - "smithy.api#documentation": "

A list of tags to be added to this resource. A tag is a key-value pair. A tag key must\n be accompanied by a tag value, although null is accepted. Available for Redis OSS only.

" + "smithy.api#documentation": "

A list of tags to be added to this resource. A tag is a key-value pair. A tag key must\n be accompanied by a tag value, although null is accepted. Available for Valkey and Redis OSS only.

" } } }, @@ -5233,13 +5233,13 @@ "GlobalNodeGroupsToRemove": { "target": "com.amazonaws.elasticache#GlobalNodeGroupIdList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.\n ElastiCache (Redis OSS) will attempt to remove all node groups listed by\n GlobalNodeGroupsToRemove from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster.\n ElastiCache will attempt to remove all node groups listed by\n GlobalNodeGroupsToRemove from the cluster.

" } }, "GlobalNodeGroupsToRetain": { "target": "com.amazonaws.elasticache#GlobalNodeGroupIdList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster.\n ElastiCache (Redis OSS) will attempt to retain all node groups listed by\n GlobalNodeGroupsToRetain from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node groups\n (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required.\n GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster.\n ElastiCache will attempt to retain all node groups listed by\n GlobalNodeGroupsToRetain from the cluster.

" } }, "ApplyImmediately": { @@ -5313,7 +5313,7 @@ } ], "traits": { - "smithy.api#documentation": "

Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" + "smithy.api#documentation": "

Dynamically decreases the number of replicas in a Valkey or Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Valkey or Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" } }, "com.amazonaws.elasticache#DecreaseReplicaCountMessage": { @@ -5330,13 +5330,13 @@ "NewReplicaCount": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Redis OSS (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ is enabled: 1

      \n
    • \n
    • \n

      If Multi-AZ is not enabled: 0

      \n
    • \n
    \n
  • \n
  • \n

    Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
" + "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Valkey or Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

\n

The minimum number of replicas in a shard or replication group is:

\n
    \n
  • \n

    Valkey or Redis OSS (cluster mode disabled)

    \n
      \n
    • \n

      If Multi-AZ is enabled: 1

      \n
    • \n
    • \n

      If Multi-AZ is not enabled: 0

      \n
    • \n
    \n
  • \n
  • \n

    Valkey or Redis OSS (cluster mode enabled): 0 (though you will not be able to failover to\n a replica if your primary node fails)

    \n
  • \n
" } }, "ReplicaConfiguration": { "target": "com.amazonaws.elasticache#ReplicaConfigurationList", "traits": { - "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" + "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Valkey or Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" } }, "ReplicasToRemove": { @@ -5435,7 +5435,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all\n associated cache nodes, node endpoints and the cluster itself. When you receive a\n successful response from this operation, Amazon ElastiCache immediately begins deleting\n the cluster; you cannot cancel or revert this operation.

\n

This operation is not valid for:

\n
    \n
  • \n

    Redis OSS (cluster mode enabled) clusters

    \n
  • \n
  • \n

    Redis OSS (cluster mode disabled) clusters

    \n
  • \n
  • \n

    A cluster that is the last read replica of a replication group

    \n
  • \n
  • \n

    A cluster that is the primary node of a replication group

    \n
  • \n
  • \n

    A node group (shard) that has Multi-AZ mode enabled

    \n
  • \n
  • \n

    A cluster from a Redis OSS (cluster mode enabled) replication group

    \n
  • \n
  • \n

    A cluster that is not in the available state

    \n
  • \n
", + "smithy.api#documentation": "

Deletes a previously provisioned cluster. DeleteCacheCluster deletes all\n associated cache nodes, node endpoints and the cluster itself. When you receive a\n successful response from this operation, Amazon ElastiCache immediately begins deleting\n the cluster; you cannot cancel or revert this operation.

\n

This operation is not valid for:

\n
    \n
  • \n

    Valkey or Redis OSS (cluster mode enabled) clusters

    \n
  • \n
  • \n

    Valkey or Redis OSS (cluster mode disabled) clusters

    \n
  • \n
  • \n

    A cluster that is the last read replica of a replication group

    \n
  • \n
  • \n

    A cluster that is the primary node of a replication group

    \n
  • \n
  • \n

    A node group (shard) that has Multi-AZ mode enabled

    \n
  • \n
  • \n

    A cluster from a Valkey or Redis OSS (cluster mode enabled) replication group

    \n
  • \n
  • \n

    A cluster that is not in the available state

    \n
  • \n
", "smithy.api#examples": [ { "title": "DeleteCacheCluster", @@ -5859,7 +5859,7 @@ "FinalSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

Name of the final snapshot to be taken before the serverless cache is deleted. Available for Redis OSS and Serverless Memcached only.\n Default: NULL, i.e. a final snapshot is not taken.

" + "smithy.api#documentation": "

Name of the final snapshot to be taken before the serverless cache is deleted. Available for Valkey, Redis OSS and Serverless Memcached only.\n Default: NULL, i.e. a final snapshot is not taken.

" } } }, @@ -5904,7 +5904,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

Deletes an existing serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#DeleteServerlessCacheSnapshotRequest": { @@ -5914,7 +5914,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Idenfitier of the snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

Idenfitier of the snapshot to be deleted. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#required": {} } } @@ -5929,7 +5929,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The snapshot to be deleted. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The snapshot to be deleted. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -5960,7 +5960,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes an existing snapshot. When you receive a successful response from this\n operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or\n revert this operation.

\n \n

This operation is valid for Redis OSS only.

\n
", + "smithy.api#documentation": "

Deletes an existing snapshot. When you receive a successful response from this\n operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or\n revert this operation.

\n \n

This operation is valid for Valkey or Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "DeleteSnapshot", @@ -6056,7 +6056,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from\n all user groups and in turn removed from all replication groups. For more information,\n see Using Role Based Access Control (RBAC).

" + "smithy.api#documentation": "

For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user. The user will be removed from\n all user groups and in turn removed from all replication groups. For more information,\n see Using Role Based Access Control (RBAC).

" } }, "com.amazonaws.elasticache#DeleteUserGroup": { @@ -6082,7 +6082,7 @@ } ], "traits": { - "smithy.api#documentation": "

For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first\n be disassociated from the replication group before it can be deleted. For more\n information, see Using Role Based Access Control (RBAC).

" + "smithy.api#documentation": "

For Valkey engine version 7.2 onwards and Redis OSS 6.0 onwards: Deletes a user group. The user group must first\n be disassociated from the replication group before it can be deleted. For more\n information, see Using Role Based Access Control (RBAC).

" } }, "com.amazonaws.elasticache#DeleteUserGroupMessage": { @@ -6418,7 +6418,7 @@ "ShowCacheClustersNotInReplicationGroups": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

An optional flag that can be included in the DescribeCacheCluster request\n to show only nodes (API/CLI: clusters) that are not members of a replication group. In\n practice, this mean Memcached and single node Redis OSS clusters.

" + "smithy.api#documentation": "

An optional flag that can be included in the DescribeCacheCluster request\n to show only nodes (API/CLI: clusters) that are not members of a replication group. In\n practice, this means Memcached and single node Valkey or Redis OSS clusters.

" } } }, @@ -6627,7 +6627,7 @@ "CacheParameterGroupFamily": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of a specific cache parameter group family to return details for.

\n

Valid values are: memcached1.4 | memcached1.5 |\n memcached1.6 | redis2.6 | redis2.8 |\n redis3.2 | redis4.0 | redis5.0 |\n redis6.x | redis6.2 | redis7\n

\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 alphanumeric characters

    \n
  • \n
  • \n

    First character must be a letter

    \n
  • \n
  • \n

    Cannot end with a hyphen or contain two consecutive hyphens

    \n
  • \n
" + "smithy.api#documentation": "

The name of a specific cache parameter group family to return details for.

\n

Valid values are: memcached1.4 | memcached1.5 |\n memcached1.6 | redis2.6 | redis2.8 |\n redis3.2 | redis4.0 | redis5.0 |\n redis6.x | redis6.2 | redis7 | valkey7\n

\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 alphanumeric characters

    \n
  • \n
  • \n

    First character must be a letter

    \n
  • \n
  • \n

    Cannot end with a hyphen or contain two consecutive hyphens

    \n
  • \n
" } }, "MaxRecords": { @@ -8311,7 +8311,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about a particular replication group. If no identifier is\n specified, DescribeReplicationGroups returns information about all\n replication groups.

\n \n

This operation is valid for Redis OSS only.

\n
", + "smithy.api#documentation": "

Returns information about a particular replication group. If no identifier is\n specified, DescribeReplicationGroups returns information about all\n replication groups.

\n \n

This operation is valid for Valkey or Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "DescribeReplicationGroups", @@ -8521,7 +8521,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only those reservations\n matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only those reservations\n matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Duration": { @@ -8902,7 +8902,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only the available\n offerings matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type filter value. Use this parameter to show only the available\n offerings matching the specified cache node type.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Duration": { @@ -8964,7 +8964,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about serverless cache snapshots. \n By default, this API lists all of the customer’s serverless cache snapshots. \n It can also describe a single serverless cache snapshot, or the snapshots associated with \n a particular serverless cache. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

Returns information about serverless cache snapshots. \n By default, this API lists all of the customer’s serverless cache snapshots. \n It can also describe a single serverless cache snapshot, or the snapshots associated with \n a particular serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -8979,31 +8979,31 @@ "ServerlessCacheName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of serverless cache. If this parameter is specified, \n only snapshots associated with that specific serverless cache are described. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The identifier of serverless cache. If this parameter is specified, \n only snapshots associated with that specific serverless cache are described. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the serverless cache’s snapshot.\n If this parameter is specified, only this snapshot is described. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The identifier of the serverless cache’s snapshot.\n If this parameter is specified, only this snapshot is described. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "SnapshotType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The type of snapshot that is being described. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The type of snapshot that is being described. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "NextToken": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "MaxResults": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The maximum number of records to include in the response. If more records exist than \n the specified max-results value, a market is included in the response so that remaining results \n can be retrieved. Available for Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50.

" + "smithy.api#documentation": "

The maximum number of records to include in the response. If more records exist than \n the specified max-results value, a market is included in the response so that remaining results \n can be retrieved. Available for Valkey, Redis OSS and Serverless Memcached only.The default is 50. The Validation Constraints are a maximum of 50.

" } } }, @@ -9017,13 +9017,13 @@ "NextToken": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

An optional marker returned from a prior request to support pagination of results from this operation. \n If this parameter is specified, the response includes only records beyond the marker, \n up to the value specified by max-results. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "ServerlessCacheSnapshots": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshotList", "traits": { - "smithy.api#documentation": "

The serverless caches snapshots associated with a given description request. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The serverless caches snapshots associated with a given description request. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -9190,7 +9190,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about cluster or replication group snapshots. By default,\n DescribeSnapshots lists all of your snapshots; it can optionally\n describe a single snapshot, or just the snapshots associated with a particular cache\n cluster.

\n \n

This operation is valid for Redis OSS only.

\n
", + "smithy.api#documentation": "

Returns information about cluster or replication group snapshots. By default,\n DescribeSnapshots lists all of your snapshots; it can optionally\n describe a single snapshot, or just the snapshots associated with a particular cache\n cluster.

\n \n

This operation is valid for Valkey or Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "DescribeSnapshots", @@ -9363,7 +9363,7 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" + "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached.

" } }, "ServiceUpdateStatus": { @@ -9517,7 +9517,7 @@ "Engine": { "target": "com.amazonaws.elasticache#EngineType", "traits": { - "smithy.api#documentation": "

The Redis OSS engine.

" + "smithy.api#documentation": "

The engine.

" } }, "UserId": { @@ -9896,7 +9896,7 @@ } ], "traits": { - "smithy.api#documentation": "

Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Redis OSS only.

" + "smithy.api#documentation": "

Provides the functionality to export the serverless cache snapshot data to Amazon S3. Available for Valkey and Redis OSS only.

" } }, "com.amazonaws.elasticache#ExportServerlessCacheSnapshotRequest": { @@ -9906,7 +9906,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The identifier of the serverless cache snapshot to be exported to S3. Available for Redis OSS only.

", + "smithy.api#documentation": "

The identifier of the serverless cache snapshot to be exported to S3. Available for Valkey and Redis OSS only.

", "smithy.api#required": {} } }, @@ -9914,7 +9914,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region \n as the snapshot. Available for Redis OSS only.

", + "smithy.api#documentation": "

Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 bucket must also be in same region \n as the snapshot. Available for Valkey and Redis OSS only.

", "smithy.api#required": {} } } @@ -9929,7 +9929,7 @@ "ServerlessCacheSnapshot": { "target": "com.amazonaws.elasticache#ServerlessCacheSnapshot", "traits": { - "smithy.api#documentation": "

The state of a serverless cache at a specific point in time, to the millisecond. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The state of a serverless cache at a specific point in time, to the millisecond. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -10127,13 +10127,13 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine. For Redis OSS only.

" + "smithy.api#documentation": "

The ElastiCache engine. For Valkey or Redis OSS only.

" } }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache (Redis OSS) engine version.

" + "smithy.api#documentation": "

The ElastiCache engine version.

" } }, "Members": { @@ -10157,7 +10157,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis OSS \n commands.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Valkey or Redis OSS \n commands.

\n

Default: false\n

" } }, "TransitEncryptionEnabled": { @@ -10412,7 +10412,7 @@ } ], "traits": { - "smithy.api#documentation": "

Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" + "smithy.api#documentation": "

Dynamically increases the number of replicas in a Valkey or Redis OSS (cluster mode disabled)\n replication group or the number of replica nodes in one or more node groups (shards) of\n a Valkey or Redis OSS (cluster mode enabled) replication group. This operation is performed with no\n cluster down time.

" } }, "com.amazonaws.elasticache#IncreaseReplicaCountMessage": { @@ -10429,13 +10429,13 @@ "NewReplicaCount": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

" + "smithy.api#documentation": "

The number of read replica nodes you want at the completion of this operation. For Valkey or Redis OSS (cluster mode disabled) replication groups, this is the number of replica nodes in\n the replication group. For Valkey or Redis OSS (cluster mode enabled) replication groups, this is the\n number of replica nodes in each of the replication group's node groups.

" } }, "ReplicaConfiguration": { "target": "com.amazonaws.elasticache#ReplicaConfigurationList", "traits": { - "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" + "smithy.api#documentation": "

A list of ConfigureShard objects that can be used to configure each\n shard in a Valkey or Redis OSS (cluster mode enabled) replication group. The\n ConfigureShard has three members: NewReplicaCount,\n NodeGroupId, and PreferredAvailabilityZones.

" } }, "ApplyImmediately": { @@ -10696,7 +10696,7 @@ "code": "InvalidServerlessCacheSnapshotStateFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The state of the serverless cache snapshot was not received. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

The state of the serverless cache snapshot was not received. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -10863,7 +10863,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all available node types that you can scale your Redis OSS cluster's or replication\n group's current node type.

\n

When you use the ModifyCacheCluster or\n ModifyReplicationGroup operations to scale your cluster or replication\n group, the value of the CacheNodeType parameter must be one of the node\n types returned by this operation.

", + "smithy.api#documentation": "

Lists all available node types that you can scale with your cluster's replication\n group's current node type.

\n

When you use the ModifyCacheCluster or\n ModifyReplicationGroup operations to scale your cluster or replication\n group, the value of the CacheNodeType parameter must be one of the node\n types returned by this operation.

", "smithy.api#examples": [ { "title": "ListAllowedNodeTypeModifications", @@ -11281,7 +11281,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of cache nodes that the cluster should have. If the value for\n NumCacheNodes is greater than the sum of the number of current cache\n nodes and the number of cache nodes pending creation (which may be zero), more nodes are\n added. If the value is less than the number of existing cache nodes, nodes are removed.\n If the value is equal to the number of current cache nodes, any pending add or remove\n requests are canceled.

\n

If you are removing cache nodes, you must use the CacheNodeIdsToRemove\n parameter to provide the IDs of the specific cache nodes to remove.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n \n

Adding or removing Memcached cache nodes can be applied immediately or as a\n pending operation (see ApplyImmediately).

\n

A pending operation to modify the number of cache nodes in a cluster during its\n maintenance window, whether by adding or removing nodes in accordance with the scale\n out architecture, is not queued. The customer's latest request to add or remove\n nodes to the cluster overrides any previous pending operations to modify the number\n of cache nodes in the cluster. For example, a request to remove 2 nodes would\n override a previous pending operation to remove 3 nodes. Similarly, a request to add\n 2 nodes would override a previous pending operation to remove 3 nodes and vice\n versa. As Memcached cache nodes may now be provisioned in different Availability\n Zones with flexible cache node placement, a request to add nodes does not\n automatically override a previous pending operation to add nodes. The customer can\n modify the previous pending operation to add more nodes or explicitly cancel the\n pending request and retry the new request. To cancel pending operations to modify\n the number of cache nodes in a cluster, use the ModifyCacheCluster\n request and set NumCacheNodes equal to the number of cache nodes\n currently in the cluster.

\n
" + "smithy.api#documentation": "

The number of cache nodes that the cluster should have. If the value for\n NumCacheNodes is greater than the sum of the number of current cache\n nodes and the number of cache nodes pending creation (which may be zero), more nodes are\n added. If the value is less than the number of existing cache nodes, nodes are removed.\n If the value is equal to the number of current cache nodes, any pending add or remove\n requests are canceled.

\n

If you are removing cache nodes, you must use the CacheNodeIdsToRemove\n parameter to provide the IDs of the specific cache nodes to remove.

\n

For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

\n \n

Adding or removing Memcached cache nodes can be applied immediately or as a\n pending operation (see ApplyImmediately).

\n

A pending operation to modify the number of cache nodes in a cluster during its\n maintenance window, whether by adding or removing nodes in accordance with the scale\n out architecture, is not queued. The customer's latest request to add or remove\n nodes to the cluster overrides any previous pending operations to modify the number\n of cache nodes in the cluster. For example, a request to remove 2 nodes would\n override a previous pending operation to remove 3 nodes. Similarly, a request to add\n 2 nodes would override a previous pending operation to remove 3 nodes and vice\n versa. As Memcached cache nodes may now be provisioned in different Availability\n Zones with flexible cache node placement, a request to add nodes does not\n automatically override a previous pending operation to add nodes. The customer can\n modify the previous pending operation to add more nodes or explicitly cancel the\n pending request and retry the new request. To cancel pending operations to modify\n the number of cache nodes in a cluster, use the ModifyCacheCluster\n request and set NumCacheNodes equal to the number of cache nodes\n currently in the cluster.

\n
" } }, "CacheNodeIdsToRemove": { @@ -11344,6 +11344,12 @@ "smithy.api#documentation": "

If true, this parameter causes the modifications in this request and any\n pending modifications to be applied, asynchronously and as soon as possible, regardless\n of the PreferredMaintenanceWindow setting for the cluster.

\n

If false, changes to the cluster are applied on the next maintenance\n reboot, or the next failure reboot, whichever occurs first.

\n \n

If you perform a ModifyCacheCluster before a pending modification is\n applied, the pending modification is replaced by the newer modification.

\n
\n

Valid values: true | false\n

\n

Default: false\n

" } }, + "Engine": { + "target": "com.amazonaws.elasticache#String", + "traits": { + "smithy.api#documentation": "

Modifies the engine listed in a cluster message. The options are redis, memcached or valkey.

" + } + }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { @@ -11353,7 +11359,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Valkey 7.2 or Redis OSS engine version 6.0 or later, set this parameter to yes \n to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -11383,7 +11389,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis OSS AUTH\n

" + "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with AUTH\n

" } }, "LogDeliveryConfigurations": { @@ -11395,7 +11401,7 @@ "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } } }, @@ -11659,6 +11665,12 @@ "smithy.api#documentation": "

A valid cache node type that you want to scale this Global datastore to.

" } }, + "Engine": { + "target": "com.amazonaws.elasticache#String", + "traits": { + "smithy.api#documentation": "

Modifies the engine listed in a global replication group message. The options are redis, memcached or valkey.

" + } + }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { @@ -11758,7 +11770,7 @@ } ], "traits": { - "smithy.api#documentation": "

Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer.

\n \n \n

This operation is valid for Redis OSS only.

\n
", + "smithy.api#documentation": "

Modifies the settings for a replication group. This is limited to Valkey and Redis OSS 7 and above.

\n \n \n

This operation is valid for Valkey or Redis OSS only.

\n
", "smithy.api#examples": [ { "title": "ModifyReplicationGroup", @@ -11857,7 +11869,7 @@ "SnapshottingClusterId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cluster ID that is used as the daily snapshot source for the replication group.\n This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.

" + "smithy.api#documentation": "

The cluster ID that is used as the daily snapshot source for the replication group.\n This parameter cannot be set for Valkey or Redis OSS (cluster mode enabled) replication groups.

" } }, "AutomaticFailoverEnabled": { @@ -11921,6 +11933,12 @@ "smithy.api#documentation": "

If true, this parameter causes the modifications in this request and any\n pending modifications to be applied, asynchronously and as soon as possible, regardless\n of the PreferredMaintenanceWindow setting for the replication group.

\n

If false, changes to the nodes in the replication group are applied on\n the next maintenance reboot, or the next failure reboot, whichever occurs first.

\n

Valid values: true | false\n

\n

Default: false\n

" } }, + "Engine": { + "target": "com.amazonaws.elasticache#String", + "traits": { + "smithy.api#documentation": "

Modifies the engine listed in a replication group message. The options are redis, memcached or valkey.

" + } + }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { @@ -11930,7 +11948,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Valkey or Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -11960,7 +11978,7 @@ "AuthTokenUpdateStrategy": { "target": "com.amazonaws.elasticache#AuthTokenUpdateStrategyType", "traits": { - "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with Redis OSS AUTH\n

" + "smithy.api#documentation": "

Specifies the strategy to use to update the AUTH token. This parameter must be\n specified with the auth-token parameter. Possible values:

\n
    \n
  • \n

    ROTATE - default, if no update strategy is provided

    \n
  • \n
  • \n

    SET - allowed only after ROTATE

    \n
  • \n
  • \n

    DELETE - allowed only when transitioning to RBAC

    \n
  • \n
\n

For more information, see Authenticating Users with AUTH\n

" } }, "UserGroupIdsToAdd": { @@ -11990,7 +12008,7 @@ "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "TransitEncryptionEnabled": { @@ -12002,13 +12020,13 @@ "TransitEncryptionMode": { "target": "com.amazonaws.elasticache#TransitEncryptionMode", "traits": { - "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

You must set TransitEncryptionEnabled to true, for your\n existing cluster, and set TransitEncryptionMode to preferred\n in the same request to allow both encrypted and unencrypted connections at the same\n time. Once you migrate all your Redis OSS clients to use encrypted connections you can set\n the value to required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

" + "smithy.api#documentation": "

A setting that allows you to migrate your clients to use in-transit encryption, with\n no downtime.

\n

You must set TransitEncryptionEnabled to true, for your\n existing cluster, and set TransitEncryptionMode to preferred\n in the same request to allow both encrypted and unencrypted connections at the same\n time. Once you migrate all your Valkey or Redis OSS clients to use encrypted connections you can set\n the value to required to allow encrypted connections only.

\n

Setting TransitEncryptionMode to required is a two-step\n process that requires you to first set the TransitEncryptionMode to\n preferred, after that you can set TransitEncryptionMode to\n required.

" } }, "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } } }, @@ -12079,7 +12097,7 @@ "target": "com.amazonaws.elasticache#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Redis OSS (cluster mode enabled) cluster (replication group) on which the\n shards are to be configured.

", + "smithy.api#documentation": "

The name of the Valkey or Redis OSS (cluster mode enabled) cluster (replication group) on which the\n shards are to be configured.

", "smithy.api#required": {} } }, @@ -12108,13 +12126,13 @@ "NodeGroupsToRemove": { "target": "com.amazonaws.elasticache#NodeGroupsToRemoveList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRemove is a\n list of NodeGroupIds to remove from the cluster.

\n

ElastiCache (Redis OSS) will attempt to remove all node groups listed by\n NodeGroupsToRemove from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRemove is a\n list of NodeGroupIds to remove from the cluster.

\n

ElastiCache will attempt to remove all node groups listed by\n NodeGroupsToRemove from the cluster.

" } }, "NodeGroupsToRetain": { "target": "com.amazonaws.elasticache#NodeGroupsToRetainList", "traits": { - "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRetain is a\n list of NodeGroupIds to retain in the cluster.

\n

ElastiCache (Redis OSS) will attempt to remove all node groups except those listed by\n NodeGroupsToRetain from the cluster.

" + "smithy.api#documentation": "

If the value of NodeGroupCount is less than the current number of node\n groups (shards), then either NodeGroupsToRemove or\n NodeGroupsToRetain is required. NodeGroupsToRetain is a\n list of NodeGroupIds to retain in the cluster.

\n

ElastiCache will attempt to remove all node groups except those listed by\n NodeGroupsToRetain from the cluster.

" } } }, @@ -12198,13 +12216,13 @@ "RemoveUserGroup": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

The identifier of the UserGroup to be removed from association with the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL.

" + "smithy.api#documentation": "

The identifier of the UserGroup to be removed from association with the Valkey and Redis OSS serverless cache. Available for Valkey and Redis OSS only. Default is NULL.

" } }, "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Redis OSS only. \n Default is NULL - the existing UserGroup is not removed.

" + "smithy.api#documentation": "

The identifier of the UserGroup to be associated with the serverless cache. Available for Valkey and Redis OSS only. \n Default is NULL - the existing UserGroup is not removed.

" } }, "SecurityGroupIds": { @@ -12216,13 +12234,25 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of days for which Elasticache retains automatic snapshots before deleting them. \n Available for Redis OSS and Serverless Memcached only.\n Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. \n The maximum value allowed is 35 days.

" + "smithy.api#documentation": "

The number of days for which Elasticache retains automatic snapshots before deleting them. \n Available for Valkey, Redis OSS and Serverless Memcached only.\n Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. \n The maximum value allowed is 35 days.

" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only.\n The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.

" + "smithy.api#documentation": "

The daily time during which Elasticache begins taking a daily snapshot of the serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only.\n The default is NULL, i.e. the existing snapshot time configured for the cluster is not removed.

" + } + }, + "Engine": { + "target": "com.amazonaws.elasticache#String", + "traits": { + "smithy.api#documentation": "

Modifies the engine listed in a serverless cache request. The options are redis, memcached or valkey.

" + } + }, + "MajorEngineVersion": { + "target": "com.amazonaws.elasticache#String", + "traits": { + "smithy.api#documentation": "

Modifies the engine vesion listed in a serverless cache request.

" } } }, @@ -12454,7 +12484,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier for the node group (shard). A Redis OSS (cluster mode disabled) replication\n group contains only 1 node group; therefore, the node group ID is 0001. A Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090.\n Optionally, the user can provide the id for a node group.

" + "smithy.api#documentation": "

The identifier for the node group (shard). A Valkey or Redis OSS (cluster mode disabled) replication\n group contains only 1 node group; therefore, the node group ID is 0001. A Valkey or Redis OSS (cluster mode enabled) replication group contains 1 to 90 node groups numbered 0001 to 0090.\n Optionally, the user can provide the id for a node group.

" } }, "Status": { @@ -12498,7 +12528,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { - "smithy.api#documentation": "

Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" + "smithy.api#documentation": "

Either the ElastiCache supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" } }, "Slots": { @@ -12578,7 +12608,7 @@ "ReadEndpoint": { "target": "com.amazonaws.elasticache#Endpoint", "traits": { - "smithy.api#documentation": "

The information required for client programs to connect to a node for read operations.\n The read endpoint is only applicable on Redis OSS (cluster mode disabled) clusters.

" + "smithy.api#documentation": "

The information required for client programs to connect to a node for read operations.\n The read endpoint is only applicable on Valkey or Redis OSS (cluster mode disabled) clusters.

" } }, "PreferredAvailabilityZone": { @@ -12596,7 +12626,7 @@ "CurrentRole": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The role that is currently assigned to the node - primary or\n replica. This member is only applicable for Redis OSS (cluster mode\n disabled) replication groups.

" + "smithy.api#documentation": "

The role that is currently assigned to the node - primary or\n replica. This member is only applicable for Valkey or Redis OSS (cluster mode\n disabled) replication groups.

" } } }, @@ -13140,7 +13170,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The new number of cache nodes for the cluster.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" + "smithy.api#documentation": "

The new number of cache nodes for the cluster.

\n

For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" } }, "CacheNodeIdsToRemove": { @@ -13232,7 +13262,7 @@ "UpdateActionStatus": { "target": "com.amazonaws.elasticache#UpdateActionStatus", "traits": { - "smithy.api#documentation": "

The status of the update action on the Redis OSS cluster

" + "smithy.api#documentation": "

The status of the update action on the Valkey or Redis OSS cluster

" } } }, @@ -13278,7 +13308,7 @@ } ], "traits": { - "smithy.api#documentation": "

Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible\n for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes for Redis OSS or Managing Costs with\n Reserved Nodes for Memcached.

", + "smithy.api#documentation": "

Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible\n for cancellation and are non-refundable. For more information, see Managing Costs with Reserved Nodes.

", "smithy.api#examples": [ { "title": "PurchaseReservedCacheNodesOfferings", @@ -13411,7 +13441,7 @@ } ], "traits": { - "smithy.api#documentation": "

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation\n applies any modified cache parameter groups to the cluster. The reboot operation takes\n place as soon as possible, and results in a momentary outage to the cluster. During the\n reboot, the cluster status is set to REBOOTING.

\n

The reboot causes the contents of the cache (for each cache node being rebooted) to be\n lost.

\n

When the reboot is complete, a cluster event is created.

\n

Rebooting a cluster is currently supported on Memcached and Redis OSS (cluster mode\n disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode enabled)\n clusters.

\n

If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster\n reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

", + "smithy.api#documentation": "

Reboots some, or all, of the cache nodes within a provisioned cluster. This operation\n applies any modified cache parameter groups to the cluster. The reboot operation takes\n place as soon as possible, and results in a momentary outage to the cluster. During the\n reboot, the cluster status is set to REBOOTING.

\n

The reboot causes the contents of the cache (for each cache node being rebooted) to be\n lost.

\n

When the reboot is complete, a cluster event is created.

\n

Rebooting a cluster is currently supported on Memcached, Valkey and Redis OSS (cluster mode\n disabled) clusters. Rebooting is not supported on Valkey or Redis OSS (cluster mode enabled)\n clusters.

\n

If you make changes to parameters that require a Valkey or Redis OSS (cluster mode enabled) cluster\n reboot for the changes to be applied, see Rebooting a Cluster for an alternate process.

", "smithy.api#examples": [ { "title": "RebootCacheCluster", @@ -13742,7 +13772,7 @@ "NodeGroups": { "target": "com.amazonaws.elasticache#NodeGroupList", "traits": { - "smithy.api#documentation": "

A list of node groups in this replication group. For Redis OSS (cluster mode disabled)\n replication groups, this is a single-element list. For Redis OSS (cluster mode enabled)\n replication groups, the list contains an entry for each node group (shard).

" + "smithy.api#documentation": "

A list of node groups in this replication group. For Valkey or Redis OSS (cluster mode disabled)\n replication groups, this is a single-element list. For Valkey or Redis OSS (cluster mode enabled)\n replication groups, the list contains an entry for each node group (shard).

" } }, "SnapshottingClusterId": { @@ -13754,7 +13784,7 @@ "AutomaticFailover": { "target": "com.amazonaws.elasticache#AutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "

Indicates the status of automatic failover for this Redis OSS replication group.

" + "smithy.api#documentation": "

Indicates the status of automatic failover for this Valkey or Redis OSS replication group.

" } }, "MultiAZ": { @@ -13796,7 +13826,7 @@ "AuthTokenEnabled": { "target": "com.amazonaws.elasticache#BooleanOptional", "traits": { - "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Redis OSS \n commands.

\n

Default: false\n

" + "smithy.api#documentation": "

A flag that enables using an AuthToken (password) when issuing Valkey or Redis OSS \n commands.

\n

Default: false\n

" } }, "AuthTokenLastModifiedDate": { @@ -13862,19 +13892,19 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "

If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you\n want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.

" + "smithy.api#documentation": "

If you are running Valkey 7.2 and above, or Redis OSS engine version 6.0 and above, set this parameter to yes if you\n want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions.

" } }, "NetworkType": { "target": "com.amazonaws.elasticache#NetworkType", "traits": { - "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Must be either ipv4 | ipv6 | dual_stack. IPv6\n is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "IpDiscovery": { "target": "com.amazonaws.elasticache#IpDiscovery", "traits": { - "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2\n onward or Memcached engine version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

The network type you choose when modifying a cluster, either ipv4 |\n ipv6. IPv6 is supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } }, "TransitEncryptionMode": { @@ -13886,12 +13916,18 @@ "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS \n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + } + }, + "Engine": { + "target": "com.amazonaws.elasticache#String", + "traits": { + "smithy.api#documentation": "

The engine used in a replication group. The options are redis, memcached or valkey.

" } } }, "traits": { - "smithy.api#documentation": "

Contains all of the attributes of a specific Redis OSS replication group.

" + "smithy.api#documentation": "

Contains all of the attributes of a specific Valkey or Redis OSS replication group.

" } }, "com.amazonaws.elasticache#ReplicationGroupAlreadyExistsFault": { @@ -14025,7 +14061,7 @@ "AutomaticFailoverStatus": { "target": "com.amazonaws.elasticache#PendingAutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "

Indicates the status of automatic failover for this Redis OSS replication group.

" + "smithy.api#documentation": "

Indicates the status of automatic failover for this Valkey or Redis OSS replication group.

" } }, "Resharding": { @@ -14067,12 +14103,12 @@ "ClusterMode": { "target": "com.amazonaws.elasticache#ClusterMode", "traits": { - "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" + "smithy.api#documentation": "

Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first\n set the cluster mode to Compatible. Compatible mode allows your Valkey or Redis OSS clients to connect\n using both cluster mode enabled and cluster mode disabled. After you migrate all Valkey or Redis OSS\n clients to use cluster mode enabled, you can then complete cluster mode configuration\n and set the cluster mode to Enabled.

" } } }, "traits": { - "smithy.api#documentation": "

The settings to be applied to the Redis OSS replication group, either immediately or\n during the next maintenance window.

" + "smithy.api#documentation": "

The settings to be applied to the Valkey or Redis OSS replication group, either immediately or\n during the next maintenance window.

" } }, "com.amazonaws.elasticache#ReservedCacheNode": { @@ -14093,7 +14129,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type for the reserved cache nodes.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type for the reserved cache nodes.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "StartTime": { @@ -14254,7 +14290,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The cache node type for the reserved cache node.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The cache node type for the reserved cache node.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Duration": { @@ -14422,7 +14458,7 @@ "NodeGroupId": { "target": "com.amazonaws.elasticache#AllowedNodeGroupId", "traits": { - "smithy.api#documentation": "

Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" + "smithy.api#documentation": "

Either the ElastiCache supplied 4-digit id or a user supplied id for the\n node group these configuration values apply to.

" } }, "PreferredAvailabilityZones": { @@ -14656,7 +14692,7 @@ "UserGroupId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.

" + "smithy.api#documentation": "

The identifier of the user group associated with the serverless cache. Available for Valkey and Redis OSS only. Default is NULL.

" } }, "SubnetIds": { @@ -14668,13 +14704,13 @@ "SnapshotRetentionLimit": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The current setting for the number of serverless cache snapshots the system will retain. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "DailySnapshotTime": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a\n specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a\n specific time on a daily basis. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, @@ -14771,60 +14807,60 @@ "ServerlessCacheSnapshotName": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The identifier of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The identifier of a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "ARN": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "KmsKeyId": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The ID of the Amazon Web Services Key Management Service (KMS) key of a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "SnapshotType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The type of snapshot of serverless cache. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The type of snapshot of serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "Status": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The current status of the serverless cache. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The current status of the serverless cache. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "CreateTime": { "target": "com.amazonaws.elasticache#TStamp", "traits": { - "smithy.api#documentation": "

The date and time that the source serverless cache's metadata and cache data set was obtained for\n the snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The date and time that the source serverless cache's metadata and cache data set was obtained for\n the snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "ExpiryTime": { "target": "com.amazonaws.elasticache#TStamp", "traits": { - "smithy.api#documentation": "

The time that the serverless cache snapshot will expire. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The time that the serverless cache snapshot will expire. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "BytesUsedForCache": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The total size of a serverless cache snapshot, in bytes. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The total size of a serverless cache snapshot, in bytes. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "ServerlessCacheConfiguration": { "target": "com.amazonaws.elasticache#ServerlessCacheConfiguration", "traits": { - "smithy.api#documentation": "

The configuration of the serverless cache, at the time the snapshot was taken. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The configuration of the serverless cache, at the time the snapshot was taken. Available for Valkey, Redis OSS and Serverless Memcached only.

" } } }, "traits": { - "smithy.api#documentation": "

The resource representing a serverless cache snapshot. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

The resource representing a serverless cache snapshot. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "com.amazonaws.elasticache#ServerlessCacheSnapshotAlreadyExistsFault": { @@ -14839,7 +14875,7 @@ "code": "ServerlessCacheSnapshotAlreadyExistsFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

A serverless cache snapshot with this name already exists. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

A serverless cache snapshot with this name already exists. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14865,7 +14901,7 @@ "code": "ServerlessCacheSnapshotNotFoundFault", "httpResponseCode": 404 }, - "smithy.api#documentation": "

This serverless cache snapshot could not be found or does not exist. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

This serverless cache snapshot could not be found or does not exist. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -14882,7 +14918,7 @@ "code": "ServerlessCacheSnapshotQuotaExceededFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Redis OSS and Serverless Memcached only.

", + "smithy.api#documentation": "

The number of serverless cache snapshots exceeds the customer snapshot quota. Available for Valkey, Redis OSS and Serverless Memcached only.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -14958,13 +14994,13 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" + "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached.

" } }, "EngineVersion": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine version to which the update applies. Either Redis OSS or Memcached\n engine version.

" + "smithy.api#documentation": "

The Elasticache engine version to which the update applies. Either Valkey, Redis OSS or Memcached\n engine version.

" } }, "AutoUpdateAfterRecommendedApplyByDate": { @@ -14981,7 +15017,7 @@ } }, "traits": { - "smithy.api#documentation": "

An update that you can apply to your Redis OSS clusters.

" + "smithy.api#documentation": "

An update that you can apply to your Valkey or Redis OSS clusters.

" } }, "com.amazonaws.elasticache#ServiceUpdateList": { @@ -15184,7 +15220,7 @@ "CacheNodeType": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The name of the compute and memory capacity node type for the source cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    Redis OSS configuration variables appendonly and\n appendfsync are not supported on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the compute and memory capacity node type for the source cluster.

\n

The following node types are supported by ElastiCache. Generally speaking, the current\n generation types provide more memory and computational power at lower cost when compared\n to their equivalent previous generation counterparts.

\n
    \n
  • \n

    General purpose:

    \n
      \n
    • \n

      Current generation:

      \n

      \n M7g node types:\n \t\t\t\t\tcache.m7g.large,\n \t\t\t\t\tcache.m7g.xlarge,\n \t\t\t\t\tcache.m7g.2xlarge,\n \t\t\t\t\tcache.m7g.4xlarge,\n \t\t\t\t\tcache.m7g.8xlarge,\n \t\t\t\t\tcache.m7g.12xlarge,\n \t\t\t\t\tcache.m7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n M6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t \n\t\t\t\t\t \tcache.m6g.large,\n\t\t\t\t\t\t\tcache.m6g.xlarge,\n\t\t\t\t\t\t\tcache.m6g.2xlarge,\n\t\t\t\t\t\t\tcache.m6g.4xlarge,\n\t\t\t\t\t\t\tcache.m6g.8xlarge,\n\t\t\t\t\t\t\tcache.m6g.12xlarge,\n\t\t\t\t\t\t\tcache.m6g.16xlarge\n

      \n

      \n M5 node types:\n cache.m5.large,\n \t\t\t\t\t\tcache.m5.xlarge,\n \t\t\t\t\t\tcache.m5.2xlarge,\n \t\t\t\t\t\tcache.m5.4xlarge,\n \t\t\t\t\t\tcache.m5.12xlarge,\n \t\t\t\t\t\tcache.m5.24xlarge\n

      \n

      \n M4 node types:\n cache.m4.large,\n \t\t\t\t\t\tcache.m4.xlarge,\n \t\t\t\t\t\tcache.m4.2xlarge,\n \t\t\t\t\t\tcache.m4.4xlarge,\n \t\t\t\t\t\tcache.m4.10xlarge\n

      \n

      \n T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version 1.5.16 onward):\n\t\t\t\t\t cache.t4g.micro,\n\t\t\t\t\t cache.t4g.small,\n\t\t\t\t\t cache.t4g.medium\n

      \n

      \n T3 node types:\n cache.t3.micro, \n \t\t\t\t\t\tcache.t3.small,\n \t\t\t\t\t\tcache.t3.medium\n

      \n

      \n T2 node types:\n cache.t2.micro, \n \t\t\t\t\t\tcache.t2.small,\n \t\t\t\t\t\tcache.t2.medium\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n T1 node types:\n cache.t1.micro\n

      \n

      \n M1 node types:\n cache.m1.small, \n\t\t\t\t\t\t cache.m1.medium, \n\t\t\t\t\t\t cache.m1.large,\n\t\t\t\t\t\t cache.m1.xlarge\n

      \n

      \n M3 node types:\n cache.m3.medium,\n \t\t\t\t\t\tcache.m3.large, \n \t\t\t\t\t\tcache.m3.xlarge,\n \t\t\t\t\t\tcache.m3.2xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Compute optimized:

    \n
      \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n C1 node types:\n cache.c1.xlarge\n

      \n
    • \n
    \n
  • \n
  • \n

    Memory optimized:

    \n
      \n
    • \n

      Current generation:

      \n

      \n R7g node types:\t\n\t\t\t\t\t\t\tcache.r7g.large,\n\t\t\t\t\t\t\tcache.r7g.xlarge,\n\t\t\t\t\t\t\tcache.r7g.2xlarge,\n\t\t\t\t\t\t\tcache.r7g.4xlarge,\n\t\t\t\t\t\t\tcache.r7g.8xlarge,\n\t\t\t\t\t\t\tcache.r7g.12xlarge,\n\t\t\t\t\t\t\tcache.r7g.16xlarge\n

      \n \n

      For region availability, see Supported Node Types\n

      \n
      \n

      \n R6g node types (available only for Redis OSS engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward):\n\t\t\t\t\t\t\tcache.r6g.large,\n\t\t\t\t\t\t\tcache.r6g.xlarge,\n\t\t\t\t\t\t\tcache.r6g.2xlarge,\n\t\t\t\t\t\t\tcache.r6g.4xlarge,\n\t\t\t\t\t\t\tcache.r6g.8xlarge,\n\t\t\t\t\t\t\tcache.r6g.12xlarge,\n\t\t\t\t\t\t\tcache.r6g.16xlarge\n

      \n

      \n R5 node types:\n cache.r5.large,\n \t\t\t\t\t cache.r5.xlarge,\n \t\t\t\t\t cache.r5.2xlarge,\n \t\t\t\t\t cache.r5.4xlarge,\n \t\t\t\t\t cache.r5.12xlarge,\n \t\t\t\t\t cache.r5.24xlarge\n

      \n

      \n R4 node types:\n cache.r4.large,\n \t\t\t\t\t cache.r4.xlarge,\n \t\t\t\t\t cache.r4.2xlarge,\n \t\t\t\t\t cache.r4.4xlarge,\n \t\t\t\t\t cache.r4.8xlarge,\n \t\t\t\t\t cache.r4.16xlarge\n

      \n
    • \n
    • \n

      Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.)

      \n

      \n M2 node types:\n cache.m2.xlarge, \n \t\t\t\t\t\tcache.m2.2xlarge,\n \t\t\t\t\t\tcache.m2.4xlarge\n

      \n

      \n R3 node types:\n cache.r3.large, \n \t\t\t\t\t\tcache.r3.xlarge,\n \t\t\t\t\t\tcache.r3.2xlarge, \n \t\t\t\t\t\tcache.r3.4xlarge,\n \t\t\t\t\t\tcache.r3.8xlarge\n

      \n
    • \n
    \n
  • \n
\n

\n Additional node type info\n

\n
    \n
  • \n

    All current generation instance types are created in Amazon VPC by\n default.

    \n
  • \n
  • \n

    Valkey or Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.

    \n
  • \n
  • \n

    Valkey or Redis OSS Multi-AZ with automatic failover is not supported on T1\n instances.

    \n
  • \n
  • \n

    The configuration variables appendonly and\n appendfsync are not supported on Valkey, or on Redis OSS version 2.8.22 and\n later.

    \n
  • \n
" } }, "Engine": { @@ -15202,7 +15238,7 @@ "NumCacheNodes": { "target": "com.amazonaws.elasticache#IntegerOptional", "traits": { - "smithy.api#documentation": "

The number of cache nodes in the source cluster.

\n

For clusters running Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" + "smithy.api#documentation": "

The number of cache nodes in the source cluster.

\n

For clusters running Valkey or Redis OSS, this value must be 1. For clusters running Memcached, this\n value must be between 1 and 40.

" } }, "PreferredAvailabilityZone": { @@ -15262,7 +15298,7 @@ "AutoMinorVersionUpgrade": { "target": "com.amazonaws.elasticache#Boolean", "traits": { - "smithy.api#documentation": "

 If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" + "smithy.api#documentation": "

 If you are running Valkey 7.2 and above or Redis OSS engine version 6.0 and above, set this parameter to yes if\n you want to opt-in to the next auto minor version upgrade campaign. This parameter is\n disabled for previous versions. 

" } }, "SnapshotRetentionLimit": { @@ -15286,7 +15322,7 @@ "AutomaticFailover": { "target": "com.amazonaws.elasticache#AutomaticFailoverStatus", "traits": { - "smithy.api#documentation": "

Indicates the status of automatic failover for the source Redis OSS replication\n group.

" + "smithy.api#documentation": "

Indicates the status of automatic failover for the source Valkey or Redis OSS replication\n group.

" } }, "NodeSnapshots": { @@ -15315,7 +15351,7 @@ } }, "traits": { - "smithy.api#documentation": "

Represents a copy of an entire Redis OSS cluster as of the time when the snapshot was\n taken.

" + "smithy.api#documentation": "

Represents a copy of an entire Valkey or Redis OSS cluster as of the time when the snapshot was\n taken.

" } }, "com.amazonaws.elasticache#SnapshotAlreadyExistsFault": { @@ -15356,7 +15392,7 @@ "code": "SnapshotFeatureNotSupportedFault", "httpResponseCode": 400 }, - "smithy.api#documentation": "

You attempted one of the following operations:

\n
    \n
  • \n

    Creating a snapshot of a Redis OSS cluster running on a\n cache.t1.micro cache node.

    \n
  • \n
  • \n

    Creating a snapshot of a cluster that is running Memcached rather than\n Redis OSS.

    \n
  • \n
\n

Neither of these are supported by ElastiCache.

", + "smithy.api#documentation": "

You attempted one of the following operations:

\n
    \n
  • \n

    Creating a snapshot of a Valkey or Redis OSS cluster running on a\n cache.t1.micro cache node.

    \n
  • \n
  • \n

    Creating a snapshot of a cluster that is running Memcached rather than\n Valkey or Redis OSS.

    \n
  • \n
\n

Neither of these are supported by ElastiCache.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15504,7 +15540,7 @@ "target": "com.amazonaws.elasticache#CustomerNodeEndpointList", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

List of endpoints from which data should be migrated. For Redis OSS (cluster mode\n disabled), list should have only one element.

", + "smithy.api#documentation": "

List of endpoints from which data should be migrated. For Valkey or Redis OSS (cluster mode\n disabled), the list should have only one element.

", "smithy.api#required": {} } } @@ -15551,7 +15587,7 @@ "SupportedNetworkTypes": { "target": "com.amazonaws.elasticache#NetworkTypeList", "traits": { - "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine\n version 1.6.6 on all instances built on the Nitro system.

" + "smithy.api#documentation": "

Either ipv4 | ipv6 | dual_stack. IPv6 is\n supported for workloads using Valkey 7.2 and above, Redis OSS engine version 6.2\n and above or Memcached engine version 1.6.6 and above on all instances built on the Nitro system.

" } } }, @@ -15752,7 +15788,7 @@ } ], "traits": { - "smithy.api#documentation": "

Represents the input of a TestFailover operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).

\n

This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.

\n

\n Note the following\n

\n
    \n
  • \n

    A customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.

    \n
  • \n
  • \n

    If calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.

    \n

    \n
  • \n
  • \n

    If calling this operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.

    \n
  • \n
  • \n

    To determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:

    \n
      \n
    1. \n

      Replication group message: Test Failover API called for node\n group \n

      \n
    2. \n
    3. \n

      Cache cluster message: Failover from primary node\n to replica node \n completed\n

      \n
    4. \n
    5. \n

      Replication group message: Failover from primary node\n to replica node \n completed\n

      \n
    6. \n
    7. \n

      Cache cluster message: Recovering cache nodes\n \n

      \n
    8. \n
    9. \n

      Cache cluster message: Finished recovery for cache nodes\n \n

      \n
    10. \n
    \n

    For more information see:

    \n \n
  • \n
\n

Also see, Testing\n Multi-AZ in the ElastiCache User Guide.

" + "smithy.api#documentation": "

Represents the input of a TestFailover operation which tests automatic\n failover on a specified node group (called shard in the console) in a replication group\n (called cluster in the console).

\n

This API is designed for testing the behavior of your application in case of\n ElastiCache failover. It is not designed to be an operational tool for initiating a\n failover to overcome a problem you may have with the cluster. Moreover, in certain\n conditions such as large-scale operational events, Amazon may block this API.

\n

\n Note the following\n

\n
    \n
  • \n

    A customer can use this operation to test automatic failover on up to 15 shards\n (called node groups in the ElastiCache API and Amazon CLI) in any rolling\n 24-hour period.

    \n
  • \n
  • \n

    If calling this operation on shards in different clusters (called replication\n groups in the API and CLI), the calls can be made concurrently.

    \n

    \n
  • \n
  • \n

    If calling this operation multiple times on different shards in the same Valkey or Redis OSS (cluster mode enabled) replication group, the first node replacement must\n complete before a subsequent call can be made.

    \n
  • \n
  • \n

    To determine whether the node replacement is complete you can check Events\n using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API.\n Look for the following automatic failover related events, listed here in order\n of occurrance:

    \n
      \n
    1. \n

      Replication group message: Test Failover API called for node\n group \n

      \n
    2. \n
    3. \n

      Cache cluster message: Failover from primary node\n to replica node \n completed\n

      \n
    4. \n
    5. \n

      Replication group message: Failover from primary node\n to replica node \n completed\n

      \n
    6. \n
    7. \n

      Cache cluster message: Recovering cache nodes\n \n

      \n
    8. \n
    9. \n

      Cache cluster message: Finished recovery for cache nodes\n \n

      \n
    10. \n
    \n

    For more information see:

    \n \n
  • \n
\n

Also see, Testing\n Multi-AZ in the ElastiCache User Guide.

" } }, "com.amazonaws.elasticache#TestFailoverMessage": { @@ -16066,7 +16102,7 @@ "Engine": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Redis OSS or Memcached.

" + "smithy.api#documentation": "

The Elasticache engine to which the update applies. Either Valkey, Redis OSS or Memcached.

" } } }, @@ -16315,7 +16351,7 @@ "ServerlessCaches": { "target": "com.amazonaws.elasticache#UGServerlessCacheIdList", "traits": { - "smithy.api#documentation": "

Indicates which serverless caches the specified user group is associated with. Available for Redis OSS and Serverless Memcached only.

" + "smithy.api#documentation": "

Indicates which serverless caches the specified user group is associated with. Available for Valkey, Redis OSS and Serverless Memcached only.

" } }, "ARN": { diff --git a/models/emr-serverless.json b/models/emr-serverless.json index c6bd0f79d1..5da81b262f 100644 --- a/models/emr-serverless.json +++ b/models/emr-serverless.json @@ -134,6 +134,12 @@ "traits": { "smithy.api#documentation": "

The interactive configuration object that enables the interactive use cases for an application.

" } + }, + "schedulerConfiguration": { + "target": "com.amazonaws.emrserverless#SchedulerConfiguration", + "traits": { + "smithy.api#documentation": "

The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

" + } } }, "traits": { @@ -1494,6 +1500,12 @@ "traits": { "smithy.api#documentation": "

The interactive configuration object that enables the interactive use cases \n to use when running an application.

" } + }, + "schedulerConfiguration": { + "target": "com.amazonaws.emrserverless#SchedulerConfiguration", + "traits": { + "smithy.api#documentation": "

The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

" + } } } }, @@ -1605,7 +1617,7 @@ "min": 20, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:(\\d{12})?:key\\/[a-zA-Z0-9-]+$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z0-9-]*):kms:[a-zA-Z0-9\\-]*:([0-9]{12}):key\\/[a-zA-Z0-9-]+$" } }, "com.amazonaws.emrserverless#EngineType": { @@ -1886,7 +1898,7 @@ "min": 20, "max": 2048 }, - "smithy.api#pattern": "^arn:(aws[a-zA-Z0-9-]*):iam::(\\d{12})?:(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)$" + "smithy.api#pattern": "^arn:(aws[a-zA-Z0-9-]*):iam::([0-9]{12}):(role((\\u002F)|(\\u002F[\\u0021-\\u007F]+\\u002F))[\\w+=,.@-]+)$" } }, "com.amazonaws.emrserverless#ImageConfiguration": { @@ -2212,6 +2224,24 @@ "traits": { "smithy.api#documentation": "

The date and time of when the job run attempt was last updated.

" } + }, + "startedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time when the job moved to the RUNNING state.

" + } + }, + "endedAt": { + "target": "com.amazonaws.emrserverless#Date", + "traits": { + "smithy.api#documentation": "

The date and time when the job was terminated.

" + } + }, + "queuedDurationMilliseconds": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

The total time for a job in the QUEUED state in milliseconds.

" + } } }, "traits": { @@ -2426,6 +2456,10 @@ { "value": "CANCELLED", "name": "CANCELLED" + }, + { + "value": "QUEUED", + "name": "QUEUED" } ] } @@ -3013,7 +3047,7 @@ } }, "traits": { - "smithy.api#documentation": "

The maximum allowed cumulative resources for an application. No new resources will be\n created once the limit is hit.

" + "smithy.api#documentation": "

The maximum allowed cumulative resources for an application. No new resources will be created once the limit is hit.

" } }, "com.amazonaws.emrserverless#MemorySize": { @@ -3239,6 +3273,26 @@ "smithy.api#documentation": "

The Amazon S3 configuration for monitoring log publishing. You can configure your jobs\n to send log information to Amazon S3.

" } }, + "com.amazonaws.emrserverless#SchedulerConfiguration": { + "type": "structure", + "members": { + "queueTimeoutMinutes": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum duration in minutes for the job in QUEUED state. If scheduler configuration is enabled on your application, the default value is 360 minutes (6 hours). The valid range is from 15 to 720.

" + } + }, + "maxConcurrentRuns": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum concurrent job runs on this application. If scheduler configuration is enabled on your application, the default value is 15. The valid range is 1 to 1000.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

" + } + }, "com.amazonaws.emrserverless#SecurityGroupIds": { "type": "list", "member": { @@ -3891,6 +3945,12 @@ "traits": { "smithy.api#documentation": "

The configuration setting for monitoring.

" } + }, + "schedulerConfiguration": { + "target": "com.amazonaws.emrserverless#SchedulerConfiguration", + "traits": { + "smithy.api#documentation": "

The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

" + } } } }, diff --git a/models/emr.json b/models/emr.json index 670c426e41..1dd2752442 100644 --- a/models/emr.json +++ b/models/emr.json @@ -4568,6 +4568,12 @@ "traits": { "smithy.api#documentation": "

The resize specification for the instance fleet.

" } + }, + "Context": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } } }, "traits": { @@ -4620,6 +4626,12 @@ "traits": { "smithy.api#documentation": "

The resize specification for the instance fleet.

" } + }, + "Context": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } } }, "traits": { @@ -4675,6 +4687,12 @@ "traits": { "smithy.api#documentation": "

An array of InstanceTypeConfig objects that specify how Amazon EMR provisions Amazon EC2 instances\n when it fulfills On-Demand and Spot capacities. For more information, see InstanceTypeConfig.

" } + }, + "Context": { + "target": "com.amazonaws.emr#XmlStringMaxLen256", + "traits": { + "smithy.api#documentation": "

Reserved.

" + } } }, "traits": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 63edba279a..5eaa988108 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -559,11 +559,13 @@ "aoss" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -1658,6 +1660,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -2052,20 +2055,76 @@ "protocols" : [ "https" ] }, "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } + "ap-northeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-1" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "tags" : [ "dualstack" ] + } ] + } } }, "arc-zonal-shift" : { @@ -2573,6 +2632,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { }, "ca-west-1" : { }, "eu-central-1" : { }, @@ -2709,6 +2769,7 @@ "bedrock" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -2718,6 +2779,12 @@ }, "hostname" : "bedrock.ap-northeast-1.amazonaws.com" }, + "bedrock-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "bedrock.ap-northeast-2.amazonaws.com" + }, "bedrock-ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -2778,6 +2845,12 @@ }, "hostname" : "bedrock-fips.us-east-1.amazonaws.com" }, + "bedrock-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock-fips.us-east-2.amazonaws.com" + }, "bedrock-fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2790,6 +2863,12 @@ }, "hostname" : "bedrock-runtime.ap-northeast-1.amazonaws.com" }, + "bedrock-runtime-ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "bedrock-runtime.ap-northeast-2.amazonaws.com" + }, "bedrock-runtime-ap-south-1" : { "credentialScope" : { "region" : "ap-south-1" @@ -2850,6 +2929,12 @@ }, "hostname" : "bedrock-runtime-fips.us-east-1.amazonaws.com" }, + "bedrock-runtime-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock-runtime-fips.us-east-2.amazonaws.com" + }, "bedrock-runtime-fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2868,6 +2953,12 @@ }, "hostname" : "bedrock-runtime.us-east-1.amazonaws.com" }, + "bedrock-runtime-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock-runtime.us-east-2.amazonaws.com" + }, "bedrock-runtime-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2886,6 +2977,12 @@ }, "hostname" : "bedrock.us-east-1.amazonaws.com" }, + "bedrock-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "bedrock.us-east-2.amazonaws.com" + }, "bedrock-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -2899,6 +2996,7 @@ "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -7249,6 +7347,12 @@ "tags" : [ "fips" ] } ] }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ap-southeast-5.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com", @@ -7386,6 +7490,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.ap-southeast-4.amazonaws.com" }, + "fips-ap-southeast-5" : { + "credentialScope" : { + "region" : "ap-southeast-5" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ap-southeast-5.amazonaws.com" + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -8417,28 +8528,133 @@ }, "firehose" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "firehose.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "firehose.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "firehose.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "firehose.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "firehose.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "firehose.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "firehose.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "firehose.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "firehose.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "firehose.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "firehose.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "firehose.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "firehose.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "firehose.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "firehose.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "firehose.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "firehose.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "firehose.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -8467,32 +8683,76 @@ "deprecated" : true, "hostname" : "firehose-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "firehose.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "firehose.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "firehose.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "firehose.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "firehose-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "firehose-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "firehose-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "firehose-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "firehose-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "firehose.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -9124,6 +9384,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "sa-east-1" : { }, @@ -9620,6 +9881,8 @@ }, "endpoints" : { "ap-south-1" : { }, + "ap-southeast-2" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -12985,6 +13248,7 @@ "eu-central-1" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -14059,6 +14323,7 @@ "eu-central-1" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -15988,8 +16253,24 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -15998,14 +16279,88 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.ca-central-1.amazonaws.com" + }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.ca-west-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "resource-explorer-2-fips.us-west-2.amazonaws.com" + }, "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "resource-explorer-2-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + }, { + "hostname" : "resource-explorer-2-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + } ] + } } }, "resource-groups" : { @@ -19489,6 +19844,86 @@ } } }, + "ssm-quicksetup" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "ssm-quicksetup-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "ssm-quicksetup-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "ssm-quicksetup-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "ssm-quicksetup-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "ssm-quicksetup-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "ssm-quicksetup-fips.us-west-2.amazonaws.com" + }, + "sa-east-1" : { }, + "us-east-1" : { + "variants" : [ { + "hostname" : "ssm-quicksetup-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "ssm-quicksetup-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "ssm-quicksetup-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "ssm-quicksetup-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } + } + }, "ssm-sap" : { "endpoints" : { "af-south-1" : { }, @@ -19497,9 +19932,11 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "ssm-sap-fips.ca-central-1.amazonaws.com", @@ -19507,8 +19944,10 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -19547,6 +19986,8 @@ "deprecated" : true, "hostname" : "ssm-sap-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { @@ -20453,82 +20894,62 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "sa-east-1" : { }, - "transcribestreaming-ca-central-1" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "deprecated" : true, + "ca-central-1" : { "variants" : [ { "hostname" : "transcribestreaming-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] } ] }, - "transcribestreaming-fips-ca-central-1" : { + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" }, "deprecated" : true, "hostname" : "transcribestreaming-fips.ca-central-1.amazonaws.com" }, - "transcribestreaming-fips-us-east-1" : { + "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" }, "deprecated" : true, "hostname" : "transcribestreaming-fips.us-east-1.amazonaws.com" }, - "transcribestreaming-fips-us-east-2" : { + "fips-us-east-2" : { "credentialScope" : { "region" : "us-east-2" }, "deprecated" : true, "hostname" : "transcribestreaming-fips.us-east-2.amazonaws.com" }, - "transcribestreaming-fips-us-west-2" : { + "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" }, "deprecated" : true, "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com" }, - "transcribestreaming-us-east-1" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, + "sa-east-1" : { }, + "us-east-1" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, - "transcribestreaming-us-east-2" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, + "us-east-2" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] } ] }, - "transcribestreaming-us-west-2" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "deprecated" : true, + "us-west-2" : { "variants" : [ { "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] } ] - }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } + } } }, "transfer" : { @@ -20913,8 +21334,10 @@ "vpc-lattice" : { "endpoints" : { "af-south-1" : { }, + "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -20925,6 +21348,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -21977,9 +22401,16 @@ "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-west-2" : { }, + "fips-ca-central-1" : { + "deprecated" : true + }, "fips-us-east-1" : { "deprecated" : true }, @@ -23887,6 +24318,7 @@ }, "aoss" : { "endpoints" : { + "us-gov-east-1" : { }, "us-gov-west-1" : { } } }, @@ -27797,6 +28229,12 @@ } } }, + "schemas" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "us-gov-east-1" : { @@ -28673,8 +29111,32 @@ }, "transcribestreaming" : { "endpoints" : { - "us-gov-east-1" : { }, - "us-gov-west-1" : { } + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "transcribestreaming-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "deprecated" : true, + "hostname" : "transcribestreaming-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "transcribestreaming-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-gov-west-1" : { + "variants" : [ { + "hostname" : "transcribestreaming-fips.us-gov-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "transfer" : { @@ -29141,8 +29603,32 @@ }, "ds" : { "endpoints" : { - "us-iso-east-1" : { }, - "us-iso-west-1" : { } + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-iso-east-1.c2s.ic.gov" + }, + "fips-us-iso-west-1" : { + "credentialScope" : { + "region" : "us-iso-west-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-iso-west-1.c2s.ic.gov" + }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-iso-west-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + } } }, "dynamodb" : { @@ -29962,7 +30448,19 @@ }, "ds" : { "endpoints" : { - "us-isob-east-1" : { } + "fips-us-isob-east-1" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "deprecated" : true, + "hostname" : "ds-fips.us-isob-east-1.sc2s.sgov.gov" + }, + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "ds-fips.us-isob-east-1.sc2s.sgov.gov", + "tags" : [ "fips" ] + } ] + } } }, "dynamodb" : { @@ -30425,6 +30923,11 @@ "endpoints" : { "us-isob-east-1" : { } } + }, + "xray" : { + "endpoints" : { + "us-isob-east-1" : { } + } } } }, { diff --git a/models/fsx.json b/models/fsx.json index 76702f73e2..c021226bc6 100644 --- a/models/fsx.json +++ b/models/fsx.json @@ -2218,7 +2218,7 @@ "Path": { "target": "com.amazonaws.fsx#ArchivePath", "traits": { - "smithy.api#documentation": "

Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. \n The Path you provide must be located within the file system’s ExportPath. \n An example Path value is \"s3://myBucket/myExportPath/optionalPrefix\". The report provides the following information for each file in the report:\n FilePath, FileStatus, and ErrorCode.

" + "smithy.api#documentation": "

Required if Enabled is set to true. Specifies the location of the report on the file system's\n linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. \n The Path you provide must be located within the file system’s ExportPath. \n An example Path value is \"s3://amzn-s3-demo-bucket/myExportPath/optionalPrefix\".\n The report provides the following information for each file in the report:\n FilePath, FileStatus, and ErrorCode.

" } }, "Format": { @@ -2644,7 +2644,7 @@ "target": "com.amazonaws.fsx#ArchivePath", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The path to the Amazon S3 data repository that will be linked to the file\n system. The path can be an S3 bucket or prefix in the format\n s3://myBucket/myPrefix/. This path specifies where in the S3\n data repository files will be imported from or exported to.

", + "smithy.api#documentation": "

The path to the Amazon S3 data repository that will be linked to the file\n system. The path can be an S3 bucket or prefix in the format\n s3://bucket-name/prefix/ (where prefix\n is optional). This path specifies where in the S3 data repository\n files will be imported from or exported to.

", "smithy.api#required": {} } }, @@ -2744,7 +2744,7 @@ "Paths": { "target": "com.amazonaws.fsx#DataRepositoryTaskPaths", "traits": { - "smithy.api#documentation": "

A list of paths for the data repository task to use when the task is processed.\n If a path that you provide isn't valid, the task fails. If you don't provide\n paths, the default behavior is to export all files to S3 (for export tasks), import\n all files from S3 (for import tasks), or release all exported files that meet the\n last accessed time criteria (for release tasks).

\n
    \n
  • \n

    For export tasks, the list contains paths on the FSx for Lustre file system\n from which the files are exported to the Amazon S3 bucket. The default path is the\n file system root directory. The paths you provide need to be relative to the mount\n point of the file system. If the mount point is /mnt/fsx and\n /mnt/fsx/path1 is a directory or file on the file system you want\n to export, then the path to provide is path1.

    \n
  • \n
  • \n

    For import tasks, the list contains paths in the Amazon S3 bucket\n from which POSIX metadata changes are imported to the FSx for Lustre file system.\n The path can be an S3 bucket or prefix in the format\n s3://myBucket/myPrefix (where myPrefix is optional).\n

    \n
  • \n
  • \n

    For release tasks, the list contains directory or file paths on the\n FSx for Lustre file system from which to release exported files. If a directory is\n specified, files within the directory are released. If a file path is specified,\n only that file is released. To release all exported files in the file system,\n specify a forward slash (/) as the path.

    \n \n

    A file must also meet the last accessed time criteria\n specified in for the\n file to be released.

    \n
    \n
  • \n
" + "smithy.api#documentation": "

A list of paths for the data repository task to use when the task is processed.\n If a path that you provide isn't valid, the task fails. If you don't provide\n paths, the default behavior is to export all files to S3 (for export tasks), import\n all files from S3 (for import tasks), or release all exported files that meet the\n last accessed time criteria (for release tasks).

\n
    \n
  • \n

    For export tasks, the list contains paths on the FSx for Lustre file system\n from which the files are exported to the Amazon S3 bucket. The default path is the\n file system root directory. The paths you provide need to be relative to the mount\n point of the file system. If the mount point is /mnt/fsx and\n /mnt/fsx/path1 is a directory or file on the file system you want\n to export, then the path to provide is path1.

    \n
  • \n
  • \n

    For import tasks, the list contains paths in the Amazon S3 bucket\n from which POSIX metadata changes are imported to the FSx for Lustre file system.\n The path can be an S3 bucket or prefix in the format\n s3://bucket-name/prefix (where prefix is optional).

    \n
  • \n
  • \n

    For release tasks, the list contains directory or file paths on the\n FSx for Lustre file system from which to release exported files. If a directory is\n specified, files within the directory are released. If a file path is specified,\n only that file is released. To release all exported files in the file system,\n specify a forward slash (/) as the path.

    \n \n

    A file must also meet the last accessed time criteria\n specified in for the\n file to be released.

    \n
    \n
  • \n
" } }, "FileSystemId": { @@ -4282,7 +4282,7 @@ "DataRepositoryPath": { "target": "com.amazonaws.fsx#ArchivePath", "traits": { - "smithy.api#documentation": "

The path to the data repository that will be linked to the cache\n or file system.

\n
    \n
  • \n

    For Amazon File Cache, the path can be an NFS data repository\n that will be linked to the cache. The path can be in one of two formats:

    \n
      \n
    • \n

      If you are not using the DataRepositorySubdirectories\n parameter, the path is to an NFS Export directory (or one of its subdirectories)\n in the format nsf://nfs-domain-name/exportpath. You can therefore\n link a single NFS Export to a single data repository association.

      \n
    • \n
    • \n

      If you are using the DataRepositorySubdirectories\n parameter, the path is the domain name of the NFS file system in the format\n nfs://filer-domain-name, which indicates the root of the\n subdirectories specified with the DataRepositorySubdirectories\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    For Amazon File Cache, the path can be an S3 bucket or prefix\n in the format s3://myBucket/myPrefix/.

    \n
  • \n
  • \n

    For Amazon FSx for Lustre, the path can be an S3 bucket or prefix\n in the format s3://myBucket/myPrefix/.

    \n
  • \n
" + "smithy.api#documentation": "

The path to the data repository that will be linked to the cache\n or file system.

\n
    \n
  • \n

    For Amazon File Cache, the path can be an NFS data repository\n that will be linked to the cache. The path can be in one of two formats:

    \n
      \n
    • \n

      If you are not using the DataRepositorySubdirectories\n parameter, the path is to an NFS Export directory (or one of its subdirectories)\n in the format nsf://nfs-domain-name/exportpath. You can therefore\n link a single NFS Export to a single data repository association.

      \n
    • \n
    • \n

      If you are using the DataRepositorySubdirectories\n parameter, the path is the domain name of the NFS file system in the format\n nfs://filer-domain-name, which indicates the root of the\n subdirectories specified with the DataRepositorySubdirectories\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    For Amazon File Cache, the path can be an S3 bucket or prefix\n in the format s3://bucket-name/prefix/ (where prefix\n is optional).

    \n
  • \n
  • \n

    For Amazon FSx for Lustre, the path can be an S3 bucket or prefix\n in the format s3://bucket-name/prefix/ (where prefix\n is optional).

    \n
  • \n
" } }, "BatchImportMetaDataOnCreate": { @@ -6817,7 +6817,7 @@ "target": "com.amazonaws.fsx#ArchivePath", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The path to the S3 or NFS data repository that links to the\n cache. You must provide one of the following paths:

\n
    \n
  • \n

    The path can be an NFS data repository that links to\n the cache. The path can be in one of two formats:

    \n
      \n
    • \n

      If you are not using the DataRepositorySubdirectories\n parameter, the path is to an NFS Export directory (or one of its subdirectories)\n in the format nfs://nfs-domain-name/exportpath. You can therefore\n link a single NFS Export to a single data repository association.

      \n
    • \n
    • \n

      If you are using the DataRepositorySubdirectories\n parameter, the path is the domain name of the NFS file system in the format\n nfs://filer-domain-name, which indicates the root of the\n subdirectories specified with the DataRepositorySubdirectories\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    The path can be an S3 bucket or prefix\n in the format s3://myBucket/myPrefix/.

    \n
  • \n
", + "smithy.api#documentation": "

The path to the S3 or NFS data repository that links to the\n cache. You must provide one of the following paths:

\n
    \n
  • \n

    The path can be an NFS data repository that links to\n the cache. The path can be in one of two formats:

    \n
      \n
    • \n

      If you are not using the DataRepositorySubdirectories\n parameter, the path is to an NFS Export directory (or one of its subdirectories)\n in the format nfs://nfs-domain-name/exportpath. You can therefore\n link a single NFS Export to a single data repository association.

      \n
    • \n
    • \n

      If you are using the DataRepositorySubdirectories\n parameter, the path is the domain name of the NFS file system in the format\n nfs://filer-domain-name, which indicates the root of the\n subdirectories specified with the DataRepositorySubdirectories\n parameter.

      \n
    • \n
    \n
  • \n
  • \n

    The path can be an S3 bucket or prefix\n in the format s3://bucket-name/prefix/ (where prefix\n is optional).

    \n
  • \n
", "smithy.api#required": {} } }, diff --git a/models/glue.json b/models/glue.json index 112e5c5b08..b98adab95f 100644 --- a/models/glue.json +++ b/models/glue.json @@ -633,6 +633,9 @@ { "target": "com.amazonaws.glue#TagResource" }, + { + "target": "com.amazonaws.glue#TestConnection" + }, { "target": "com.amazonaws.glue#UntagResource" }, @@ -2462,17 +2465,17 @@ "smithy.api#documentation": "

A structure containing the authentication configuration in the CreateConnection request.

" } }, - "SecretArn": { - "target": "com.amazonaws.glue#SecretArn", - "traits": { - "smithy.api#documentation": "

The secret manager ARN to store credentials in the CreateConnection request.

" - } - }, "OAuth2Properties": { "target": "com.amazonaws.glue#OAuth2PropertiesInput", "traits": { "smithy.api#documentation": "

The properties for OAuth2 authentication in the CreateConnection request.

" } + }, + "SecretArn": { + "target": "com.amazonaws.glue#SecretArn", + "traits": { + "smithy.api#documentation": "

The secret manager ARN to store credentials in the CreateConnection request.

" + } } }, "traits": { @@ -2509,7 +2512,8 @@ "min": 1, "max": 4096 }, - "smithy.api#pattern": "^\\S+$" + "smithy.api#pattern": "^\\S+$", + "smithy.api#sensitive": {} } }, "com.amazonaws.glue#AuthorizationCodeProperties": { @@ -6857,6 +6861,12 @@ "smithy.api#documentation": "

These key-value pairs define parameters for the connection:

\n
    \n
  • \n

    \n HOST - The host URI: either the\n fully qualified domain name (FQDN) or the IPv4 address of\n the database host.

    \n
  • \n
  • \n

    \n PORT - The port number, between\n 1024 and 65535, of the port on which the database host is\n listening for database connections.

    \n
  • \n
  • \n

    \n USER_NAME - The name under which\n to log in to the database. The value string for USER_NAME is \"USERNAME\".

    \n
  • \n
  • \n

    \n PASSWORD - A password,\n if one is used, for the user name.

    \n
  • \n
  • \n

    \n ENCRYPTED_PASSWORD - When you enable connection password protection by setting ConnectionPasswordEncryption in the Data Catalog encryption settings, this field stores the encrypted password.

    \n
  • \n
  • \n

    \n JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the\n JAR file that contains the JDBC driver to use.

    \n
  • \n
  • \n

    \n JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use.

    \n
  • \n
  • \n

    \n JDBC_ENGINE - The name of the JDBC engine to use.

    \n
  • \n
  • \n

    \n JDBC_ENGINE_VERSION - The version of the JDBC engine to use.

    \n
  • \n
  • \n

    \n CONFIG_FILES - (Reserved for future use.)

    \n
  • \n
  • \n

    \n INSTANCE_ID - The instance ID to use.

    \n
  • \n
  • \n

    \n JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source.

    \n
  • \n
  • \n

    \n JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether Secure\n Sockets Layer (SSL) with hostname matching is enforced for the JDBC connection on the\n client. The default is false.

    \n
  • \n
  • \n

    \n CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root certificate. Glue uses this root certificate to validate the customer’s certificate when connecting to the customer database. Glue only handles X.509 certificates. The certificate provided must be DER-encoded and supplied in Base64 encoding PEM format.

    \n
  • \n
  • \n

    \n SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, this is false. Glue validates the Signature algorithm and Subject Public Key Algorithm for the customer certificate. The only permitted algorithms for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the Subject Public Key Algorithm, the key length must be at least 2048. You can set the value of this property to true to skip Glue’s validation of the customer certificate.

    \n
  • \n
  • \n

    \n CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which is used for domain match or distinguished name match to prevent a man-in-the-middle attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the hostNameInCertificate.

    \n
  • \n
  • \n

    \n CONNECTION_URL - The URL for connecting to a general (non-JDBC) data source.

    \n
  • \n
  • \n

    \n SECRET_ID - The secret ID used for the secret manager of credentials.

    \n
  • \n
  • \n

    \n CONNECTOR_URL - The connector URL for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n CONNECTOR_TYPE - The connector type for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n CONNECTOR_CLASS_NAME - The connector class name for a MARKETPLACE or CUSTOM connection.

    \n
  • \n
  • \n

    \n KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs that are the addresses of the Apache Kafka brokers in a Kafka cluster to which a Kafka client will connect to and bootstrap itself.

    \n
  • \n
  • \n

    \n KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka connection. Default value is \"true\".

    \n
  • \n
  • \n

    \n KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem format). The default is an empty string.

    \n
  • \n
  • \n

    \n KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA cert file or not. Glue validates for three algorithms: SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is \"false\".

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEYSTORE - The Amazon S3 location of the client keystore file for Kafka client side authentication (Optional).

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEYSTORE_PASSWORD - The password to access the provided keystore (Optional).

    \n
  • \n
  • \n

    \n KAFKA_CLIENT_KEY_PASSWORD - A keystore can consist of multiple keys, so this is the password to access the client key to be used with the Kafka server side key (Optional).

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD - The encrypted version of the Kafka client keystore password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD - The encrypted version of the Kafka client key password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_MECHANISM - \"SCRAM-SHA-512\", \"GSSAPI\", \"AWS_MSK_IAM\", or \"PLAIN\". These are the supported SASL Mechanisms.

    \n
  • \n
  • \n

    \n KAFKA_SASL_PLAIN_USERNAME - A plaintext username used to authenticate with the \"PLAIN\" mechanism.

    \n
  • \n
  • \n

    \n KAFKA_SASL_PLAIN_PASSWORD - A plaintext password used to authenticate with the \"PLAIN\" mechanism.

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD - The encrypted version of the Kafka SASL PLAIN password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_USERNAME - A plaintext username used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_PASSWORD - A plaintext password used to authenticate with the \"SCRAM-SHA-512\" mechanism.

    \n
  • \n
  • \n

    \n ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD - The encrypted version of the Kafka SASL SCRAM password (if the user has the Glue encrypt passwords setting selected).

    \n
  • \n
  • \n

    \n KAFKA_SASL_SCRAM_SECRETS_ARN - The Amazon Resource Name of a secret in Amazon Web Services Secrets Manager.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_KEYTAB - The S3 location of a Kerberos keytab file. A keytab stores long-term keys for one or more principals. For more information, see MIT Kerberos Documentation: Keytab.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_KRB5_CONF - The S3 location of a Kerberos krb5.conf file. A krb5.conf stores Kerberos configuration information, such as the location of the KDC server. For more information, see MIT Kerberos Documentation: krb5.conf.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_SERVICE - The Kerberos service name, as set with sasl.kerberos.service.name in your Kafka Configuration.

    \n
  • \n
  • \n

    \n KAFKA_SASL_GSSAPI_PRINCIPAL - The name of the Kerberos princial used by Glue. For more information, see Kafka Documentation: Configuring Kafka Brokers.

    \n
  • \n
  • \n

    \n ROLE_ARN - The role to be used for running queries.

    \n
  • \n
  • \n

    \n REGION - The Amazon Web Services Region where queries will be run.

    \n
  • \n
  • \n

    \n WORKGROUP_NAME - The name of an Amazon Redshift serverless workgroup or Amazon Athena workgroup in which queries will run.

    \n
  • \n
  • \n

    \n CLUSTER_IDENTIFIER - The cluster identifier of an Amazon Redshift cluster in which queries will run.

    \n
  • \n
  • \n

    \n DATABASE - The Amazon Redshift database that you are connecting to.

    \n
  • \n
" } }, + "AthenaProperties": { + "target": "com.amazonaws.glue#PropertyMap", + "traits": { + "smithy.api#documentation": "

This field is not currently used.

" + } + }, "PhysicalConnectionRequirements": { "target": "com.amazonaws.glue#PhysicalConnectionRequirements", "traits": { @@ -6946,6 +6956,12 @@ "smithy.api#required": {} } }, + "AthenaProperties": { + "target": "com.amazonaws.glue#PropertyMap", + "traits": { + "smithy.api#documentation": "

This field is not currently used.

" + } + }, "PhysicalConnectionRequirements": { "target": "com.amazonaws.glue#PhysicalConnectionRequirements", "traits": { @@ -28869,6 +28885,24 @@ "smithy.api#documentation": "

Specifies the job and session values that an admin configures in an Glue usage profile.

" } }, + "com.amazonaws.glue#PropertyKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.glue#PropertyMap": { + "type": "map", + "key": { + "target": "com.amazonaws.glue#PropertyKey" + }, + "value": { + "target": "com.amazonaws.glue#PropertyValue" + } + }, "com.amazonaws.glue#PropertyPredicate": { "type": "structure", "members": { @@ -28895,6 +28929,15 @@ "smithy.api#documentation": "

Defines a property predicate.

" } }, + "com.amazonaws.glue#PropertyValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, "com.amazonaws.glue#PublicKeysList": { "type": "list", "member": { @@ -36229,6 +36272,102 @@ } } }, + "com.amazonaws.glue#TestConnection": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#TestConnectionRequest" + }, + "output": { + "target": "com.amazonaws.glue#TestConnectionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#AccessDeniedException" + }, + { + "target": "com.amazonaws.glue#ConflictException" + }, + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#FederationSourceException" + }, + { + "target": "com.amazonaws.glue#GlueEncryptionException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#ResourceNumberLimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

Tests a connection to a service to validate the service credentials that you provide.

\n

You can either provide an existing connection name or a TestConnectionInput for testing a non-existing connection input. Providing both at the same time will cause an error.

\n

If the action is successful, the service sends back an HTTP 200 response.

" + } + }, + "com.amazonaws.glue#TestConnectionInput": { + "type": "structure", + "members": { + "ConnectionType": { + "target": "com.amazonaws.glue#ConnectionType", + "traits": { + "smithy.api#documentation": "

The type of connection to test. This operation is only available for the JDBC or SALESFORCE connection types.

", + "smithy.api#required": {} + } + }, + "ConnectionProperties": { + "target": "com.amazonaws.glue#ConnectionProperties", + "traits": { + "smithy.api#documentation": "

The key-value pairs that define parameters for the connection.

\n

JDBC connections use the following connection properties:

\n
    \n
  • \n

    Required: All of (HOST, PORT, JDBC_ENGINE) or JDBC_CONNECTION_URL.

    \n
  • \n
  • \n

    Required: All of (USERNAME, PASSWORD) or SECRET_ID.

    \n
  • \n
  • \n

    Optional: JDBC_ENFORCE_SSL, CUSTOM_JDBC_CERT, CUSTOM_JDBC_CERT_STRING, SKIP_CUSTOM_JDBC_CERT_VALIDATION. These parameters are used to configure SSL with JDBC.

    \n
  • \n
\n

SALESFORCE connections require the AuthenticationConfiguration member to be configured.

", + "smithy.api#required": {} + } + }, + "AuthenticationConfiguration": { + "target": "com.amazonaws.glue#AuthenticationConfigurationInput", + "traits": { + "smithy.api#documentation": "

A structure containing the authentication configuration in the TestConnection request. Required for a connection to Salesforce using OAuth authentication.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that is used to specify testing a connection to a service.

" + } + }, + "com.amazonaws.glue#TestConnectionRequest": { + "type": "structure", + "members": { + "ConnectionName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#documentation": "

Optional. The name of the connection to test. If only name is provided, the operation will get the connection and use that for testing.

" + } + }, + "TestConnectionInput": { + "target": "com.amazonaws.glue#TestConnectionInput", + "traits": { + "smithy.api#documentation": "

A structure that is used to specify testing a connection to a service.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.glue#TestConnectionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.glue#ThrottlingException": { "type": "structure", "members": { diff --git a/models/guardduty.json b/models/guardduty.json index 8343b77769..e81b116ea5 100644 --- a/models/guardduty.json +++ b/models/guardduty.json @@ -4674,6 +4674,13 @@ "smithy.api#documentation": "

The name of the task group that's associated with the task.

", "smithy.api#jsonName": "group" } + }, + "LaunchType": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

A capacity on which the task is running. For example, Fargate and EC2.

", + "smithy.api#jsonName": "launchType" + } } }, "traits": { @@ -8774,7 +8781,7 @@ "target": "com.amazonaws.guardduty#SourceIps", "traits": { "smithy.api#documentation": "

The IP of the Kubernetes API caller and the IPs of any proxies or load balancers between\n the caller and the API endpoint.

", - "smithy.api#jsonName": "sourceIps" + "smithy.api#jsonName": "sourceIPs" } }, "UserAgent": { @@ -11042,6 +11049,13 @@ "smithy.api#jsonName": "localIpDetails" } }, + "LocalNetworkInterface": { + "target": "com.amazonaws.guardduty#String", + "traits": { + "smithy.api#documentation": "

The EC2 instance's local elastic network interface utilized for the connection.

", + "smithy.api#jsonName": "localNetworkInterface" + } + }, "RemoteIpDetails": { "target": "com.amazonaws.guardduty#RemoteIpDetails", "traits": { diff --git a/models/iot-data-plane.json b/models/iot-data-plane.json index 08866867f5..1b58de88f6 100644 --- a/models/iot-data-plane.json +++ b/models/iot-data-plane.json @@ -273,7 +273,23 @@ "method": "GET", "uri": "/things/{thingName}/shadow", "code": 200 - } + }, + "smithy.test#smokeTests": [ + { + "id": "GetThingShadowFailure", + "params": { + "thingName": "fake-thing" + }, + "vendorParams": { + "region": "us-west-2", + "uri": "https://data-ats.iot.us-west-2.amazonaws.com" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ] } }, "com.amazonaws.iotdataplane#GetThingShadowRequest": { diff --git a/models/iot.json b/models/iot.json index b8be375eb2..08eca7377b 100644 --- a/models/iot.json +++ b/models/iot.json @@ -42,6 +42,9 @@ { "target": "com.amazonaws.iot#AddThingToThingGroup" }, + { + "target": "com.amazonaws.iot#AssociateSbomWithPackageVersion" + }, { "target": "com.amazonaws.iot#AssociateTargetsWithJob" }, @@ -396,6 +399,9 @@ { "target": "com.amazonaws.iot#DisableTopicRule" }, + { + "target": "com.amazonaws.iot#DisassociateSbomFromPackageVersion" + }, { "target": "com.amazonaws.iot#EnableTopicRule" }, @@ -576,6 +582,9 @@ { "target": "com.amazonaws.iot#ListRoleAliases" }, + { + "target": "com.amazonaws.iot#ListSbomValidationResults" + }, { "target": "com.amazonaws.iot#ListScheduledAudits" }, @@ -2581,6 +2590,35 @@ "smithy.api#documentation": "

Contains information that allowed the authorization.

" } }, + "com.amazonaws.iot#ApplicationProtocol": { + "type": "enum", + "members": { + "SECURE_MQTT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SECURE_MQTT" + } + }, + "MQTT_WSS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MQTT_WSS" + } + }, + "HTTPS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "HTTPS" + } + }, + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT" + } + } + } + }, "com.amazonaws.iot#ApproximateSecondsBeforeTimedOut": { "type": "long" }, @@ -2726,6 +2764,111 @@ "smithy.api#documentation": "

Contains an asset property value (of a single type).

" } }, + "com.amazonaws.iot#AssociateSbomWithPackageVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.iot#AssociateSbomWithPackageVersionRequest" + }, + "output": { + "target": "com.amazonaws.iot#AssociateSbomWithPackageVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iot#ConflictException" + }, + { + "target": "com.amazonaws.iot#InternalServerException" + }, + { + "target": "com.amazonaws.iot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iot#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iot#ThrottlingException" + }, + { + "target": "com.amazonaws.iot#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Associates the selected software bill of materials (SBOM) with a specific software package version.

\n

Requires permission to access the AssociateSbomWithPackageVersion action.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/packages/{packageName}/versions/{versionName}/sbom", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.iot#AssociateSbomWithPackageVersionRequest": { + "type": "structure", + "members": { + "packageName": { + "target": "com.amazonaws.iot#PackageName", + "traits": { + "smithy.api#documentation": "

The name of the new software package.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "versionName": { + "target": "com.amazonaws.iot#VersionName", + "traits": { + "smithy.api#documentation": "

The name of the new package version.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sbom": { + "target": "com.amazonaws.iot#Sbom", + "traits": { + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.iot#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "smithy.api#httpQuery": "clientToken", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.iot#AssociateSbomWithPackageVersionResponse": { + "type": "structure", + "members": { + "packageName": { + "target": "com.amazonaws.iot#PackageName", + "traits": { + "smithy.api#documentation": "

The name of the new software package.

" + } + }, + "versionName": { + "target": "com.amazonaws.iot#VersionName", + "traits": { + "smithy.api#documentation": "

The name of the new package version.

" + } + }, + "sbom": { + "target": "com.amazonaws.iot#Sbom" + }, + "sbomValidationStatus": { + "target": "com.amazonaws.iot#SbomValidationStatus", + "traits": { + "smithy.api#documentation": "

The status of the initial validation for the software bill of materials against the Software Package Data Exchange (SPDX) and CycloneDX industry standard formats.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.iot#AssociateTargetsWithJob": { "type": "operation", "input": { @@ -3931,6 +4074,41 @@ "target": "com.amazonaws.iot#AuthResult" } }, + "com.amazonaws.iot#AuthenticationType": { + "type": "enum", + "members": { + "CUSTOM_AUTH_X509": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM_AUTH_X509" + } + }, + "CUSTOM_AUTH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CUSTOM_AUTH" + } + }, + "AWS_X509": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_X509" + } + }, + "AWS_SIGV4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AWS_SIGV4" + } + }, + "DEFAULT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DEFAULT" + } + } + } + }, "com.amazonaws.iot#AuthorizerArn": { "type": "string", "traits": { @@ -4382,6 +4560,12 @@ "com.amazonaws.iot#BatchMode": { "type": "boolean" }, + "com.amazonaws.iot#BeforeSubstitutionFlag": { + "type": "boolean", + "traits": { + "smithy.api#default": false + } + }, "com.amazonaws.iot#Behavior": { "type": "structure", "members": { @@ -5761,6 +5945,30 @@ "smithy.api#output": {} } }, + "com.amazonaws.iot#ClientCertificateCallbackArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.iot#ClientCertificateConfig": { + "type": "structure", + "members": { + "clientCertificateCallbackArn": { + "target": "com.amazonaws.iot#ClientCertificateCallbackArn", + "traits": { + "smithy.api#documentation": "

The ARN of the Lambda function that IoT invokes after mutual TLS authentication during the connection.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that specifies the client certificate configuration for a domain.

" + } + }, "com.amazonaws.iot#ClientId": { "type": "string" }, @@ -6463,7 +6671,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a billing group.

\n

Requires permission to access the CreateBillingGroup action.

", + "smithy.api#documentation": "

Creates a billing group. If this call is made multiple times using\n\t\t\tthe same billing group name and configuration, the call will succeed. If this call is made with\n\t\t\tthe same billing group name but different configuration a ResourceAlreadyExistsException is thrown.

\n

Requires permission to access the CreateBillingGroup action.

", "smithy.api#http": { "method": "POST", "uri": "/billing-groups/{billingGroupName}", @@ -7009,6 +7217,24 @@ "traits": { "smithy.api#documentation": "

The server certificate configuration.

" } + }, + "authenticationType": { + "target": "com.amazonaws.iot#AuthenticationType", + "traits": { + "smithy.api#documentation": "

An enumerated string that specifies the authentication type.

\n
    \n
  • \n

    \n CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate.

    \n
  • \n
\n \n
    \n
  • \n

    \n AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information,\n see X.509 client certificates.

    \n
  • \n
\n \n
    \n
  • \n

    \n DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type.\n For more information, see Device communication protocols.

    \n
  • \n
" + } + }, + "applicationProtocol": { + "target": "com.amazonaws.iot#ApplicationProtocol", + "traits": { + "smithy.api#documentation": "

An enumerated string that specifies the application-layer protocol.

\n
    \n
  • \n

    \n SECURE_MQTT - MQTT over TLS.

    \n
  • \n
\n
    \n
  • \n

    \n MQTT_WSS - MQTT over WebSocket.

    \n
  • \n
\n
    \n
  • \n

    \n HTTPS - HTTP over TLS.

    \n
  • \n
\n
    \n
  • \n

    \n DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. \n For more information, see Device communication protocols.

    \n
  • \n
" + } + }, + "clientCertificateConfig": { + "target": "com.amazonaws.iot#ClientCertificateConfig", + "traits": { + "smithy.api#documentation": "

An object that specifies the client certificate configuration for a domain.

" + } } }, "traits": { @@ -8131,6 +8357,18 @@ "smithy.api#documentation": "

Metadata that can be used to define a package version’s configuration. For example, the S3 file location, configuration options that are being sent to the device or fleet.

\n

The combined size of all the attributes on a package version is limited to 3KB.

" } }, + "artifact": { + "target": "com.amazonaws.iot#PackageVersionArtifact", + "traits": { + "smithy.api#documentation": "

The various build components created during the build process such as libraries and\n configuration files that make up a software package version.

" + } + }, + "recipe": { + "target": "com.amazonaws.iot#PackageVersionRecipe", + "traits": { + "smithy.api#documentation": "

The inline job document associated with a software package version used for a quick job\n deployment.

" + } + }, "tags": { "target": "com.amazonaws.iot#TagMap", "traits": { @@ -8755,7 +8993,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a role alias.

\n

Requires permission to access the CreateRoleAlias action.

", + "smithy.api#documentation": "

Creates a role alias.

\n

Requires permission to access the CreateRoleAlias action.

\n \n

The value of \n credentialDurationSeconds\n must be less than or equal to the maximum session \n duration of the IAM role that the role alias references. For more information, see \n \n Modifying a role maximum session duration (Amazon Web Services API) from the Amazon Web Services Identity and Access Management User Guide.

\n
", "smithy.api#http": { "method": "POST", "uri": "/role-aliases/{roleAlias}", @@ -9365,7 +9603,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new thing type.

\n

Requires permission to access the CreateThingType action.

", + "smithy.api#documentation": "

Creates a new thing type. If this call is made multiple times using\n\t\t\tthe same thing type name and configuration, the call will succeed. If this call is made with\n\t\t\tthe same thing type name but different configuration a ResourceAlreadyExistsException is thrown.\n\t\t

\n

Requires permission to access the CreateThingType action.

", "smithy.api#http": { "method": "POST", "uri": "/thing-types/{thingTypeName}", @@ -13156,6 +13394,24 @@ "traits": { "smithy.api#documentation": "

The server certificate configuration.

" } + }, + "authenticationType": { + "target": "com.amazonaws.iot#AuthenticationType", + "traits": { + "smithy.api#documentation": "

An enumerated string that specifies the authentication type.

\n
    \n
  • \n

    \n CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate.

    \n
  • \n
\n \n
    \n
  • \n

    \n AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information,\n see X.509 client certificates.

    \n
  • \n
\n \n
    \n
  • \n

    \n DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type.\n For more information, see Device communication protocols.

    \n
  • \n
" + } + }, + "applicationProtocol": { + "target": "com.amazonaws.iot#ApplicationProtocol", + "traits": { + "smithy.api#documentation": "

An enumerated string that specifies the application-layer protocol.

\n
    \n
  • \n

    \n SECURE_MQTT - MQTT over TLS.

    \n
  • \n
\n
    \n
  • \n

    \n MQTT_WSS - MQTT over WebSocket.

    \n
  • \n
\n
    \n
  • \n

    \n HTTPS - HTTP over TLS.

    \n
  • \n
\n
    \n
  • \n

    \n DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. \n For more information, see Device communication protocols.

    \n
  • \n
" + } + }, + "clientCertificateConfig": { + "target": "com.amazonaws.iot#ClientCertificateConfig", + "traits": { + "smithy.api#documentation": "

An object that specifies the client certificate configuration for a domain.

" + } } }, "traits": { @@ -13618,6 +13874,14 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "beforeSubstitution": { + "target": "com.amazonaws.iot#BeforeSubstitutionFlag", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values.

", + "smithy.api#httpQuery": "beforeSubstitution" + } } }, "traits": { @@ -15728,6 +15992,80 @@ "smithy.api#input": {} } }, + "com.amazonaws.iot#DisassociateSbomFromPackageVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.iot#DisassociateSbomFromPackageVersionRequest" + }, + "output": { + "target": "com.amazonaws.iot#DisassociateSbomFromPackageVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iot#ConflictException" + }, + { + "target": "com.amazonaws.iot#InternalServerException" + }, + { + "target": "com.amazonaws.iot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iot#ThrottlingException" + }, + { + "target": "com.amazonaws.iot#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Disassociates the selected software bill of materials (SBOM) from a specific software package version.

\n

Requires permission to access the DisassociateSbomWithPackageVersion action.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/packages/{packageName}/versions/{versionName}/sbom", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.iot#DisassociateSbomFromPackageVersionRequest": { + "type": "structure", + "members": { + "packageName": { + "target": "com.amazonaws.iot#PackageName", + "traits": { + "smithy.api#documentation": "

The name of the new software package.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "versionName": { + "target": "com.amazonaws.iot#VersionName", + "traits": { + "smithy.api#documentation": "

The name of the new package version.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.iot#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required.

", + "smithy.api#httpQuery": "clientToken", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.iot#DisassociateSbomFromPackageVersionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.iot#DisconnectReason": { "type": "string" }, @@ -17342,6 +17680,14 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "beforeSubstitution": { + "target": "com.amazonaws.iot#BeforeSubstitutionFlag", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A flag that provides a view of the job document before and after the substitution parameters have been resolved with their exact values.

", + "smithy.api#httpQuery": "beforeSubstitution" + } } }, "traits": { @@ -17711,6 +18057,12 @@ "smithy.api#documentation": "

Metadata that were added to the package version that can be used to define a package version’s configuration.

" } }, + "artifact": { + "target": "com.amazonaws.iot#PackageVersionArtifact", + "traits": { + "smithy.api#documentation": "

The various components that make up a software package version.

" + } + }, "status": { "target": "com.amazonaws.iot#PackageVersionStatus", "traits": { @@ -17734,6 +18086,24 @@ "traits": { "smithy.api#documentation": "

The date when the package version was last updated.

" } + }, + "sbom": { + "target": "com.amazonaws.iot#Sbom", + "traits": { + "smithy.api#documentation": "

The software bill of materials for a software package version.

" + } + }, + "sbomValidationStatus": { + "target": "com.amazonaws.iot#SbomValidationStatus", + "traits": { + "smithy.api#documentation": "

The status of the validation for a new software bill of materials added to a software\n package version.

" + } + }, + "recipe": { + "target": "com.amazonaws.iot#PackageVersionRecipe", + "traits": { + "smithy.api#documentation": "

The inline job document associated with a software package version used for a quick job\n deployment.

" + } } }, "traits": { @@ -23747,6 +24117,108 @@ "smithy.api#output": {} } }, + "com.amazonaws.iot#ListSbomValidationResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.iot#ListSbomValidationResultsRequest" + }, + "output": { + "target": "com.amazonaws.iot#ListSbomValidationResultsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iot#InternalServerException" + }, + { + "target": "com.amazonaws.iot#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iot#ThrottlingException" + }, + { + "target": "com.amazonaws.iot#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

The validation results for all software bill of materials (SBOM) attached to a specific software package version.

\n

Requires permission to access the ListSbomValidationResults action.

", + "smithy.api#http": { + "method": "GET", + "uri": "/packages/{packageName}/versions/{versionName}/sbom-validation-results", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "validationResultSummaries", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iot#ListSbomValidationResultsRequest": { + "type": "structure", + "members": { + "packageName": { + "target": "com.amazonaws.iot#PackageName", + "traits": { + "smithy.api#documentation": "

The name of the new software package.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "versionName": { + "target": "com.amazonaws.iot#VersionName", + "traits": { + "smithy.api#documentation": "

The name of the new package version.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "validationResult": { + "target": "com.amazonaws.iot#SbomValidationResult", + "traits": { + "smithy.api#documentation": "

The end result of the

", + "smithy.api#httpQuery": "validationResult" + } + }, + "maxResults": { + "target": "com.amazonaws.iot#PackageCatalogMaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return at one time.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.iot#NextToken", + "traits": { + "smithy.api#documentation": "

A token that can be used to retrieve the next set of results, or null if there are no additional results.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.iot#ListSbomValidationResultsResponse": { + "type": "structure", + "members": { + "validationResultSummaries": { + "target": "com.amazonaws.iot#SbomValidationResultSummaryList", + "traits": { + "smithy.api#documentation": "

A summary of the validation results for each software bill of materials attached to a software package version.

" + } + }, + "nextToken": { + "target": "com.amazonaws.iot#NextToken", + "traits": { + "smithy.api#documentation": "

A token that can be used to retrieve the next set of results, or null if there are no additional results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.iot#ListScheduledAudits": { "type": "operation", "input": { @@ -27107,9 +27579,30 @@ "smithy.api#pattern": "^arn:[!-~]+$" } }, + "com.amazonaws.iot#PackageVersionArtifact": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.iot#S3Location" + } + }, + "traits": { + "smithy.api#documentation": "

A specific package version artifact associated with a software package version.

" + } + }, "com.amazonaws.iot#PackageVersionErrorReason": { "type": "string" }, + "com.amazonaws.iot#PackageVersionRecipe": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 3072 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.iot#PackageVersionStatus": { "type": "enum", "members": { @@ -29414,6 +29907,115 @@ } } }, + "com.amazonaws.iot#Sbom": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.iot#S3Location" + } + }, + "traits": { + "smithy.api#documentation": "

A specific software bill of matrerials associated with a software\n package version.

" + } + }, + "com.amazonaws.iot#SbomValidationErrorCode": { + "type": "enum", + "members": { + "INCOMPATIBLE_FORMAT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCOMPATIBLE_FORMAT" + } + }, + "FILE_SIZE_LIMIT_EXCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FILE_SIZE_LIMIT_EXCEEDED" + } + } + } + }, + "com.amazonaws.iot#SbomValidationErrorMessage": { + "type": "string" + }, + "com.amazonaws.iot#SbomValidationResult": { + "type": "enum", + "members": { + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + } + } + }, + "com.amazonaws.iot#SbomValidationResultSummary": { + "type": "structure", + "members": { + "fileName": { + "target": "com.amazonaws.iot#FileName", + "traits": { + "smithy.api#documentation": "

The name of the SBOM file.

" + } + }, + "validationResult": { + "target": "com.amazonaws.iot#SbomValidationResult", + "traits": { + "smithy.api#documentation": "

The end result of the SBOM validation.

" + } + }, + "errorCode": { + "target": "com.amazonaws.iot#SbomValidationErrorCode", + "traits": { + "smithy.api#documentation": "

The errorCode representing the validation failure error if the SBOM\n validation failed.

" + } + }, + "errorMessage": { + "target": "com.amazonaws.iot#SbomValidationErrorMessage", + "traits": { + "smithy.api#documentation": "

The errorMessage representing the validation failure error if the SBOM\n validation failed.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summary of the validation results for a specific software bill of materials (SBOM) attached to a software package version.

" + } + }, + "com.amazonaws.iot#SbomValidationResultSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.iot#SbomValidationResultSummary" + } + }, + "com.amazonaws.iot#SbomValidationStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + } + } + }, "com.amazonaws.iot#ScheduledAuditArn": { "type": "string" }, @@ -29786,7 +30388,7 @@ "enableOCSPCheck": { "target": "com.amazonaws.iot#EnableOCSPCheck", "traits": { - "smithy.api#documentation": "

A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server\n certificate check is enabled or not.

\n

For more information, see Configuring OCSP server-certificate stapling in domain\n configuration from Amazon Web Services IoT Core Developer Guide.

" + "smithy.api#documentation": "

A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server\n certificate check is enabled or not.

\n

For more information, see Configuring OCSP server-certificate stapling in domain\n configuration from Amazon Web Services IoT Core Developer Guide.

" } } }, @@ -31939,7 +32541,7 @@ "thingGroupNames": { "target": "com.amazonaws.iot#ThingGroupNameList", "traits": { - "smithy.api#documentation": "

Thing group names.

" + "smithy.api#documentation": "

Thing group and billing group names.

" } }, "attributes": { @@ -34048,6 +34650,24 @@ "traits": { "smithy.api#documentation": "

The server certificate configuration.

" } + }, + "authenticationType": { + "target": "com.amazonaws.iot#AuthenticationType", + "traits": { + "smithy.api#documentation": "

An enumerated string that specifies the authentication type.

\n
    \n
  • \n

    \n CUSTOM_AUTH_X509 - Use custom authentication and authorization with additional details from the X.509 client certificate.

    \n
  • \n
\n \n
    \n
  • \n

    \n AWS_X509 - Use X.509 client certificates without custom authentication and authorization. For more information,\n see X.509 client certificates.

    \n
  • \n
\n \n
    \n
  • \n

    \n DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify authentication type.\n For more information, see Device communication protocols.

    \n
  • \n
" + } + }, + "applicationProtocol": { + "target": "com.amazonaws.iot#ApplicationProtocol", + "traits": { + "smithy.api#documentation": "

An enumerated string that specifies the application-layer protocol.

\n
    \n
  • \n

    \n SECURE_MQTT - MQTT over TLS.

    \n
  • \n
\n
    \n
  • \n

    \n MQTT_WSS - MQTT over WebSocket.

    \n
  • \n
\n
    \n
  • \n

    \n HTTPS - HTTP over TLS.

    \n
  • \n
\n
    \n
  • \n

    \n DEFAULT - Use a combination of port and Application Layer Protocol Negotiation (ALPN) to specify application_layer protocol. \n For more information, see Device communication protocols.

    \n
  • \n
" + } + }, + "clientCertificateConfig": { + "target": "com.amazonaws.iot#ClientCertificateConfig", + "traits": { + "smithy.api#documentation": "

An object that specifies the client certificate configuration for a domain.

" + } } }, "traits": { @@ -34783,12 +35403,24 @@ "smithy.api#documentation": "

Metadata that can be used to define a package version’s configuration. For example, the Amazon S3 file location, configuration options that are being sent to the device or fleet.

\n

\n Note: Attributes can be updated only when the package version\n is in a draft state.

\n

The combined size of all the attributes on a package version is limited to 3KB.

" } }, + "artifact": { + "target": "com.amazonaws.iot#PackageVersionArtifact", + "traits": { + "smithy.api#documentation": "

The various components that make up a software package version.

" + } + }, "action": { "target": "com.amazonaws.iot#PackageVersionAction", "traits": { "smithy.api#documentation": "

The status that the package version should be assigned. For more information, see Package version lifecycle.

" } }, + "recipe": { + "target": "com.amazonaws.iot#PackageVersionRecipe", + "traits": { + "smithy.api#documentation": "

The inline job document associated with a software package version used for a quick job\n deployment.

" + } + }, "clientToken": { "target": "com.amazonaws.iot#ClientToken", "traits": { @@ -34931,7 +35563,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a role alias.

\n

Requires permission to access the UpdateRoleAlias action.

", + "smithy.api#documentation": "

Updates a role alias.

\n

Requires permission to access the UpdateRoleAlias action.

\n \n

The value of \n credentialDurationSeconds\n must be less than or equal to the\n maximum session duration of the IAM role that the role alias references. For more\n information, see Modifying a role maximum session duration (Amazon Web Services API) from the Amazon Web Services\n Identity and Access Management User Guide.

\n
", "smithy.api#http": { "method": "PUT", "uri": "/role-aliases/{roleAlias}", @@ -35289,6 +35921,9 @@ { "target": "com.amazonaws.iot#InvalidRequestException" }, + { + "target": "com.amazonaws.iot#LimitExceededException" + }, { "target": "com.amazonaws.iot#ResourceNotFoundException" }, diff --git a/models/iotdeviceadvisor.json b/models/iotdeviceadvisor.json index 0ca5e6ae2f..fd638cd1e0 100644 --- a/models/iotdeviceadvisor.json +++ b/models/iotdeviceadvisor.json @@ -55,6 +55,16 @@ } } }, + "com.amazonaws.iotdeviceadvisor#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\u0021-\\u007E]+$" + } + }, "com.amazonaws.iotdeviceadvisor#ConflictException": { "type": "structure", "members": { @@ -112,6 +122,13 @@ "traits": { "smithy.api#documentation": "

The tags to be attached to the suite definition.

" } + }, + "clientToken": { + "target": "com.amazonaws.iotdeviceadvisor#ClientToken", + "traits": { + "smithy.api#documentation": "

The client token for the test suite definition creation. \n This token is used for tracking test suite definition creation \n using retries and obtaining its status. This parameter is optional.

", + "smithy.api#idempotencyToken": {} + } } }, "traits": { diff --git a/models/iotfleetwise.json b/models/iotfleetwise.json index f0bd3f5e59..6d45e04fa8 100644 --- a/models/iotfleetwise.json +++ b/models/iotfleetwise.json @@ -102,7 +102,8 @@ "smithy.api#length": { "min": 1, "max": 1011 - } + }, + "smithy.api#pattern": "^arn:.*" } }, "com.amazonaws.iotfleetwise#AssociateVehicleFleet": { @@ -463,7 +464,7 @@ "type": "structure", "members": { "arn": { - "target": "com.amazonaws.iotfleetwise#arn", + "target": "com.amazonaws.iotfleetwise#campaignArn", "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a campaign.

" } @@ -897,6 +898,9 @@ "priority": { "target": "com.amazonaws.iotfleetwise#priority", "traits": { + "smithy.api#deprecated": { + "message": "priority is no longer used or needed as input" + }, "smithy.api#documentation": "

(Optional) A number indicating the priority of one campaign over another campaign for\n a certain vehicle or fleet. A campaign with the lowest value is deployed to vehicles\n before any other campaigns. If it's not specified, 0 is used.

\n

Default: 0\n

" } }, @@ -943,7 +947,7 @@ } }, "arn": { - "target": "com.amazonaws.iotfleetwise#arn", + "target": "com.amazonaws.iotfleetwise#campaignArn", "traits": { "smithy.api#documentation": "

The ARN of the created campaign.

" } @@ -1667,10 +1671,12 @@ "target": "com.amazonaws.iotfleetwise#NodePath" }, "traits": { + "aws.api#data": "content", "smithy.api#length": { "min": 0, "max": 5 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.iotfleetwise#DataFormat": { @@ -1868,7 +1874,7 @@ } }, "arn": { - "target": "com.amazonaws.iotfleetwise#arn", + "target": "com.amazonaws.iotfleetwise#campaignArn", "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the deleted campaign.

\n \n

The ARN isn’t returned if a campaign doesn’t exist.

\n
" } @@ -2503,7 +2509,7 @@ } }, "arn": { - "target": "com.amazonaws.iotfleetwise#arn", + "target": "com.amazonaws.iotfleetwise#campaignArn", "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the campaign.

" } @@ -4592,7 +4598,7 @@ } }, "status": { - "target": "com.amazonaws.iotfleetwise#status", + "target": "com.amazonaws.iotfleetwise#statusStr", "traits": { "smithy.api#documentation": "

Optional parameter to filter the results by the status of each created campaign in\n your account. The status can be one of: CREATING,\n WAITING_FOR_APPROVAL, RUNNING, or\n SUSPENDED.

", "smithy.api#httpQuery": "status" @@ -7290,10 +7296,12 @@ "target": "com.amazonaws.iotfleetwise#SignalInformation" }, "traits": { + "aws.api#data": "content", "smithy.api#length": { "min": 0, "max": 1000 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.iotfleetwise#SignalNodeType": { @@ -7983,7 +7991,7 @@ "type": "structure", "members": { "arn": { - "target": "com.amazonaws.iotfleetwise#arn", + "target": "com.amazonaws.iotfleetwise#campaignArn", "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the campaign.

" } @@ -8829,7 +8837,7 @@ "type": "structure", "members": { "campaignName": { - "target": "com.amazonaws.iotfleetwise#string", + "target": "com.amazonaws.iotfleetwise#campaignName", "traits": { "smithy.api#documentation": "

The name of a campaign.

" } @@ -8962,6 +8970,12 @@ "target": "com.amazonaws.iotfleetwise#attributeValue" } }, + "com.amazonaws.iotfleetwise#campaignArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws:iotfleetwise:[a-z0-9-]+:[0-9]{12}:campaign/[a-zA-Z\\d\\-_:]{1,100}$" + } + }, "com.amazonaws.iotfleetwise#campaignName": { "type": "string", "traits": { @@ -8983,7 +8997,7 @@ "traits": { "smithy.api#range": { "min": 10000, - "max": 60000 + "max": 86400000 } } }, @@ -9038,10 +9052,12 @@ "com.amazonaws.iotfleetwise#eventExpression": { "type": "string", "traits": { + "aws.api#data": "content", "smithy.api#length": { "min": 1, "max": 2048 - } + }, + "smithy.api#sensitive": {} } }, "com.amazonaws.iotfleetwise#fleetId": { @@ -9070,7 +9086,8 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 1 + "min": 1, + "max": 1 } } }, @@ -9188,8 +9205,15 @@ "target": "com.amazonaws.iotfleetwise#SignalCatalogSummary" } }, - "com.amazonaws.iotfleetwise#status": { - "type": "string" + "com.amazonaws.iotfleetwise#statusStr": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 7, + "max": 20 + }, + "smithy.api#pattern": "^[A-Z_]*$" + } }, "com.amazonaws.iotfleetwise#string": { "type": "string" diff --git a/models/ivs-realtime.json b/models/ivs-realtime.json index c3099d5492..4f32146ef9 100644 --- a/models/ivs-realtime.json +++ b/models/ivs-realtime.json @@ -2190,7 +2190,7 @@ "errorCode": { "target": "com.amazonaws.ivsrealtime#EventErrorCode", "traits": { - "smithy.api#documentation": "

If the event is an error event, the error code is provided to give insight into the\n specific error that occurred. If the event is not an error event, this field is null.\n INSUFFICIENT_CAPABILITIES indicates that the participant tried to take an\n action that the participant’s token is not allowed to do. For more information about\n participant capabilities, see the capabilities field in CreateParticipantToken. QUOTA_EXCEEDED indicates that the\n number of participants who want to publish/subscribe to a stage exceeds the quota; for more\n information, see Service Quotas.\n PUBLISHER_NOT_FOUND indicates that the participant tried to subscribe to a\n publisher that doesn’t exist.

" + "smithy.api#documentation": "

If the event is an error event, the error code is provided to give insight into the\n specific error that occurred. If the event is not an error event, this field is null.

\n
    \n
  • \n

    \n B_FRAME_PRESENT —\n\t\t The participant's stream includes B-frames.\n\t\t For details, see \n\t\t IVS RTMP Publishing.

    \n
  • \n
  • \n

    \n BITRATE_EXCEEDED —\n\t\t The participant exceeded the maximum supported bitrate.\n\t\t For details, see \n\t\t Service Quotas.

    \n
  • \n
  • \n

    \n INSUFFICIENT_CAPABILITIES —\n\t\t The participant tried to take an action\n\t\t that the participant’s token is not allowed to do. For details on participant capabilities, see\n\t\t the capabilities field in CreateParticipantToken.

    \n
  • \n
  • \n

    \n INTERNAL_SERVER_EXCEPTION —\n\t\t The participant failed to publish to the stage due to an internal server error.

    \n
  • \n
  • \n

    \n INVALID_AUDIO_CODEC —\n\t\t The participant is using an invalid audio codec.\n\t\t For details, see \n\t\t Stream Ingest.

    \n
  • \n
  • \n

    \n INVALID_INPUT —\n\t\t The participant is using an invalid input stream.

    \n
  • \n
  • \n

    \n INVALID_PROTOCOL —\n\t\t The participant's IngestConfiguration resource is configured for RTMPS but they tried streaming with RTMP.\n\t\t For details, see \n\t\t IVS RTMP Publishing.

    \n
  • \n
  • \n

    \n INVALID_STREAM_KEY —\n\t\t The participant is using an invalid stream key.\n\t\t For details, see \n\t\t IVS RTMP Publishing.

    \n
  • \n
  • \n

    \n INVALID_VIDEO_CODEC —\n\t\t The participant is using an invalid video codec.\n\t\t For details, see \n\t\t Stream Ingest.

    \n
  • \n
  • \n

    \n PUBLISHER_NOT_FOUND —\n\t\t The participant tried to subscribe to a publisher that doesn’t exist.

    \n
  • \n
  • \n

    \n QUOTA_EXCEEDED —\n\t\t The number of participants who want to publish/subscribe to a stage exceeds the quota.\n\t\t For details, see \n\t\t Service Quotas.

    \n
  • \n
  • \n

    \n RESOLUTION_EXCEEDED —\n\t\t The participant exceeded the maximum supported resolution.\n\t\t For details, see \n\t\t Service Quotas.

    \n
  • \n
  • \n

    \n REUSE_OF_STREAM_KEY —\n\t\t The participant tried to use a stream key that is associated with another active stage session.

    \n
  • \n
  • \n

    \n STREAM_DURATION_EXCEEDED —\n\t\t The participant exceeded the maximum allowed stream duration.\n\t\t For details, see \n\t\t Service Quotas.

    \n
  • \n
" } } }, @@ -2266,6 +2266,24 @@ "traits": { "smithy.api#enumValue": "REUSE_OF_STREAM_KEY" } + }, + "B_FRAME_PRESENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "B_FRAME_PRESENT" + } + }, + "INVALID_INPUT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INVALID_INPUT" + } + }, + "INTERNAL_SERVER_EXCEPTION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INTERNAL_SERVER_EXCEPTION" + } } } }, @@ -2878,7 +2896,7 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 1, + "min": 2, "max": 1920 } } @@ -5795,13 +5813,13 @@ "width": { "target": "com.amazonaws.ivsrealtime#Width", "traits": { - "smithy.api#documentation": "

Video-resolution width. Note that the maximum value is determined by width\n times height, such that the maximum total pixels is 2073600 (1920x1080 or\n 1080x1920). Default: 1280.

" + "smithy.api#documentation": "

Video-resolution width. This must be an even number. Note that the maximum value is determined by width\n times height, such that the maximum total pixels is 2073600 (1920x1080 or\n 1080x1920). Default: 1280.

" } }, "height": { "target": "com.amazonaws.ivsrealtime#Height", "traits": { - "smithy.api#documentation": "

Video-resolution height. Note that the maximum value is determined by width\n times height, such that the maximum total pixels is 2073600 (1920x1080 or\n 1080x1920). Default: 720.

" + "smithy.api#documentation": "

Video-resolution height. This must be an even number. Note that the maximum value is determined by width\n times height, such that the maximum total pixels is 2073600 (1920x1080 or\n 1080x1920). Default: 720.

" } }, "framerate": { @@ -5877,7 +5895,7 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 1, + "min": 2, "max": 1920 } } diff --git a/models/ivs.json b/models/ivs.json index 950439110e..bd40fc7c47 100644 --- a/models/ivs.json +++ b/models/ivs.json @@ -4037,6 +4037,12 @@ "traits": { "smithy.api#documentation": "

Time when the event occurred. This is an ISO 8601 timestamp; note that this is\n returned as a string.

" } + }, + "code": { + "target": "com.amazonaws.ivs#String", + "traits": { + "smithy.api#documentation": "

Provides additional details about the stream event. There are several values; note that \n\t\t\tthe long descriptions are provided in the IVS console but not delivered through \n\t \t the IVS API or EventBridge:

\n
    \n
  • \n

    \n StreamTakeoverMediaMismatch — The broadcast client attempted to take over \n\t\t\twith different media properties (e.g., codec, resolution, or video track type) from the \n\t\t\toriginal stream.

    \n
  • \n
  • \n

    \n StreamTakeoverInvalidPriority — The broadcast client attempted a takeover \n\t\t\twith either a priority integer value equal to or lower than the original stream's value or a value outside \n\t\t\tthe allowed range of 1 to 2,147,483,647.

    \n
  • \n
  • \n

    \n StreamTakeoverLimitBreached — The broadcast client reached the maximum allowed \n\t\t\ttakeover attempts for this stream.

    \n
  • \n
" + } } }, "traits": { diff --git a/models/kinesis.json b/models/kinesis.json index 4176b38755..035a669dd7 100644 --- a/models/kinesis.json +++ b/models/kinesis.json @@ -300,7 +300,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Kinesis data stream. A stream captures and transports data records that are\n continuously emitted from different data sources or producers.\n Scale-out within a stream is explicitly supported by means of shards, which are uniquely\n identified groups of data records in a stream.

\n

You can create your data stream using either on-demand or provisioned capacity mode.\n Data streams with an on-demand mode require no capacity planning and automatically scale\n to handle gigabytes of write and read throughput per minute. With the on-demand mode,\n Kinesis Data Streams automatically manages the shards in order to provide the necessary\n throughput. For the data streams with a provisioned mode, you must specify the number of\n shards for the data stream. Each shard can support reads up to five transactions per\n second, up to a maximum data read total of 2 MiB per second. Each shard can support\n writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per\n second. If the amount of data input increases or decreases, you can add or remove\n shards.

\n

The stream name identifies the stream. The name is scoped to the Amazon Web Services\n account used by the application. It is also scoped by Amazon Web Services Region. That\n is, two streams in two different accounts can have the same name, and two streams in the\n same account, but in two different Regions, can have the same name.

\n

\n CreateStream is an asynchronous operation. Upon receiving a\n CreateStream request, Kinesis Data Streams immediately returns and sets\n the stream status to CREATING. After the stream is created, Kinesis Data\n Streams sets the stream status to ACTIVE. You should perform read and write\n operations only on an ACTIVE stream.

\n

You receive a LimitExceededException when making a\n CreateStream request when you try to do one of the following:

\n
    \n
  • \n

    Have more than five streams in the CREATING state at any point in\n time.

    \n
  • \n
  • \n

    Create more shards than are authorized for your account.

    \n
  • \n
\n

For the default shard limit for an Amazon Web Services account, see Amazon\n Kinesis Data Streams Limits in the Amazon Kinesis Data Streams\n Developer Guide. To increase this limit, contact Amazon Web Services\n Support.

\n

You can use DescribeStreamSummary to check the stream status, which\n is returned in StreamStatus.

\n

\n CreateStream has a limit of five transactions per second per\n account.

" + "smithy.api#documentation": "

Creates a Kinesis data stream. A stream captures and transports data records that are\n continuously emitted from different data sources or producers.\n Scale-out within a stream is explicitly supported by means of shards, which are uniquely\n identified groups of data records in a stream.

\n

You can create your data stream using either on-demand or provisioned capacity mode.\n Data streams with an on-demand mode require no capacity planning and automatically scale\n to handle gigabytes of write and read throughput per minute. With the on-demand mode,\n Kinesis Data Streams automatically manages the shards in order to provide the necessary\n throughput. For the data streams with a provisioned mode, you must specify the number of\n shards for the data stream. Each shard can support reads up to five transactions per\n second, up to a maximum data read total of 2 MiB per second. Each shard can support\n writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per\n second. If the amount of data input increases or decreases, you can add or remove\n shards.

\n

The stream name identifies the stream. The name is scoped to the Amazon Web Services\n account used by the application. It is also scoped by Amazon Web Services Region. That\n is, two streams in two different accounts can have the same name, and two streams in the\n same account, but in two different Regions, can have the same name.

\n

\n CreateStream is an asynchronous operation. Upon receiving a\n CreateStream request, Kinesis Data Streams immediately returns and sets\n the stream status to CREATING. After the stream is created, Kinesis Data\n Streams sets the stream status to ACTIVE. You should perform read and write\n operations only on an ACTIVE stream.

\n

You receive a LimitExceededException when making a\n CreateStream request when you try to do one of the following:

\n
    \n
  • \n

    Have more than five streams in the CREATING state at any point in\n time.

    \n
  • \n
  • \n

    Create more shards than are authorized for your account.

    \n
  • \n
\n

For the default shard limit for an Amazon Web Services account, see Amazon\n Kinesis Data Streams Limits in the Amazon Kinesis Data Streams\n Developer Guide. To increase this limit, contact Amazon Web Services\n Support.

\n

You can use DescribeStreamSummary to check the stream status, which\n is returned in StreamStatus.

\n

\n CreateStream has a limit of five transactions per second per\n account.

\n

You can add tags to the stream when making a CreateStream request by\n setting the Tags parameter. If you pass Tags parameter, in\n addition to having kinesis:createStream permission, you must also have\n kinesis:addTagsToStream permission for the stream that will be created.\n Tags will take effect from the CREATING status of the stream.

" } }, "com.amazonaws.kinesis#CreateStreamInput": { @@ -324,6 +324,12 @@ "traits": { "smithy.api#documentation": "

Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams,\n you can choose between an on-demand capacity mode and a\n provisioned capacity mode for your data\n streams.

" } + }, + "Tags": { + "target": "com.amazonaws.kinesis#TagMap", + "traits": { + "smithy.api#documentation": "

A set of up to 10 key-value pairs to use to create the tags.

" + } } }, "traits": { @@ -1296,6 +1302,9 @@ { "target": "com.amazonaws.kinesis#LimitExceededException" }, + { + "target": "com.amazonaws.kinesis#ResourceInUseException" + }, { "target": "com.amazonaws.kinesis#ResourceNotFoundException" } @@ -6929,7 +6938,7 @@ } ], "traits": { - "smithy.api#documentation": "

Registers a consumer with a Kinesis data stream. When you use this operation, the\n consumer you register can then call SubscribeToShard to receive data\n from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every\n shard you subscribe to. This rate is unaffected by the total number of consumers that\n read from the same stream.

\n

You can register up to 20 consumers per stream. A given consumer can only be\n registered with one stream at a time.

\n

For an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.

\n

The use of this operation has a limit of five transactions per second per account.\n Also, only 5 consumers can be created simultaneously. In other words, you cannot have\n more than 5 consumers in a CREATING status at the same time. Registering a\n 6th consumer while there are 5 in a CREATING status results in a\n LimitExceededException.

", + "smithy.api#documentation": "

Registers a consumer with a Kinesis data stream. When you use this operation, the\n consumer you register can then call SubscribeToShard to receive data\n from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every\n shard you subscribe to. This rate is unaffected by the total number of consumers that\n read from the same stream.

\n

You can register up to 20 consumers per stream. A given consumer can only be\n registered with one stream at a time.

\n

For an example of how to use this operation, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.

\n

The use of this operation has a limit of five transactions per second per account.\n Also, only 5 consumers can be created simultaneously. In other words, you cannot have\n more than 5 consumers in a CREATING status at the same time. Registering a\n 6th consumer while there are 5 in a CREATING status results in a\n LimitExceededException.

", "smithy.rules#staticContextParams": { "OperationType": { "value": "control" @@ -7889,7 +7898,7 @@ } ], "traits": { - "smithy.api#documentation": "

This operation establishes an HTTP/2 connection between the consumer you specify in\n the ConsumerARN parameter and the shard you specify in the\n ShardId parameter. After the connection is successfully established,\n Kinesis Data Streams pushes records from the shard to the consumer over this connection.\n Before you call this operation, call RegisterStreamConsumer to\n register the consumer with Kinesis Data Streams.

\n

When the SubscribeToShard call succeeds, your consumer starts receiving\n events of type SubscribeToShardEvent over the HTTP/2 connection for up\n to 5 minutes, after which time you need to call SubscribeToShard again to\n renew the subscription if you want to continue to receive records.

\n

You can make one call to SubscribeToShard per second per registered\n consumer per shard. For example, if you have a 4000 shard stream and two registered\n stream consumers, you can make one SubscribeToShard request per second for\n each combination of shard and registered consumer, allowing you to subscribe both\n consumers to all 4000 shards in one second.

\n

If you call SubscribeToShard again with the same ConsumerARN\n and ShardId within 5 seconds of a successful call, you'll get a\n ResourceInUseException. If you call SubscribeToShard 5\n seconds or more after a successful call, the second call takes over the subscription and\n the previous connection expires or fails with a\n ResourceInUseException.

\n

For an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.

", + "smithy.api#documentation": "

This operation establishes an HTTP/2 connection between the consumer you specify in\n the ConsumerARN parameter and the shard you specify in the\n ShardId parameter. After the connection is successfully established,\n Kinesis Data Streams pushes records from the shard to the consumer over this connection.\n Before you call this operation, call RegisterStreamConsumer to\n register the consumer with Kinesis Data Streams.

\n

When the SubscribeToShard call succeeds, your consumer starts receiving\n events of type SubscribeToShardEvent over the HTTP/2 connection for up\n to 5 minutes, after which time you need to call SubscribeToShard again to\n renew the subscription if you want to continue to receive records.

\n

You can make one call to SubscribeToShard per second per registered\n consumer per shard. For example, if you have a 4000 shard stream and two registered\n stream consumers, you can make one SubscribeToShard request per second for\n each combination of shard and registered consumer, allowing you to subscribe both\n consumers to all 4000 shards in one second.

\n

If you call SubscribeToShard again with the same ConsumerARN\n and ShardId within 5 seconds of a successful call, you'll get a\n ResourceInUseException. If you call SubscribeToShard 5\n seconds or more after a successful call, the second call takes over the subscription and\n the previous connection expires or fails with a\n ResourceInUseException.

\n

For an example of how to use this operation, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.

", "smithy.rules#staticContextParams": { "OperationType": { "value": "data" diff --git a/models/lambda.json b/models/lambda.json index 3a15790aad..674144fde1 100644 --- a/models/lambda.json +++ b/models/lambda.json @@ -1635,6 +1635,23 @@ ], "traits": { "smithy.api#documentation": "

Adds permissions to the resource-based policy of a version of an Lambda\n layer. Use this action to grant layer\n usage permission to other accounts. You can grant permission to a single account, all accounts in an organization,\n or all Amazon Web Services accounts.

\n

To revoke permission, call RemoveLayerVersionPermission with the statement ID that you\n specified when you added it.

", + "smithy.api#examples": [ + { + "title": "To add permissions to a layer version", + "documentation": "The following example grants permission for the account 223456789012 to use version 1 of a layer named my-layer.", + "input": { + "LayerName": "my-layer", + "VersionNumber": 1, + "StatementId": "xaccount", + "Action": "lambda:GetLayerVersion", + "Principal": "223456789012" + }, + "output": { + "Statement": "{\"Sid\":\"xaccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::223456789012:root\"},\"Action\":\"lambda:GetLayerVersion\",\"Resource\":\"arn:aws:lambda:us-east-2:123456789012:layer:my-layer:1\"}", + "RevisionId": "35d87451-f796-4a3f-a618-95a3671b0a0c" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", @@ -1753,7 +1770,37 @@ } ], "traits": { - "smithy.api#documentation": "

Grants an Amazon Web Servicesservice, Amazon Web Services account, or Amazon Web Services organization\n permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict\n access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name\n (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies\n to version $LATEST.

\n

To grant permission to another account, specify the account ID as the Principal. To grant\n permission to an organization defined in Organizations, specify the organization ID as the\n PrincipalOrgID. For Amazon Web Servicesservices, the principal is a domain-style identifier that\n the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Servicesservices, you can also specify the ARN of the associated resource as the SourceArn. If\n you grant permission to a service principal without specifying the source, other accounts could potentially\n configure resources in their account to invoke your Lambda function.

\n

This operation adds a statement to a resource-based permissions policy for the function. For more information\n about function policies, see Using resource-based policies for Lambda.

", + "smithy.api#documentation": "

Grants a principal \n permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict\n access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name\n (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies\n to version $LATEST.

\n

To grant permission to another account, specify the account ID as the Principal. To grant\n permission to an organization defined in Organizations, specify the organization ID as the\n PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that\n the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If\n you grant permission to a service principal without specifying the source, other accounts could potentially\n configure resources in their account to invoke your Lambda function.

\n

This operation adds a statement to a resource-based permissions policy for the function. For more information\n about function policies, see Using resource-based policies for Lambda.

", + "smithy.api#examples": [ + { + "title": "To grant Amazon S3 permission to invoke a function", + "documentation": "The following example adds permission for Amazon S3 to invoke a Lambda function named my-function for notifications from a bucket named my-bucket-1xpuxmplzrlbh in account 123456789012.", + "input": { + "FunctionName": "my-function", + "StatementId": "s3", + "Action": "lambda:InvokeFunction", + "Principal": "s3.amazonaws.com", + "SourceArn": "arn:aws:s3:::my-bucket-1xpuxmplzrlbh/*", + "SourceAccount": "123456789012" + }, + "output": { + "Statement": "{\"Sid\":\"s3\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"s3.amazonaws.com\"},\"Action\":\"lambda:InvokeFunction\",\"Resource\":\"arn:aws:lambda:us-east-2:123456789012:function:my-function\",\"Condition\":{\"StringEquals\":{\"AWS:SourceAccount\":\"123456789012\"},\"ArnLike\":{\"AWS:SourceArn\":\"arn:aws:s3:::my-bucket-1xpuxmplzrlbh\"}}}" + } + }, + { + "title": "To grant another account permission to invoke a function", + "documentation": "The following example adds permission for account 223456789012 invoke a Lambda function named my-function.", + "input": { + "FunctionName": "my-function", + "StatementId": "xaccount", + "Action": "lambda:InvokeFunction", + "Principal": "223456789012" + }, + "output": { + "Statement": "{\"Sid\":\"xaccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::223456789012:root\"},\"Action\":\"lambda:InvokeFunction\",\"Resource\":\"arn:aws:lambda:us-east-2:123456789012:function:my-function\"}" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2015-03-31/functions/{FunctionName}/policy", @@ -1789,20 +1836,20 @@ "Principal": { "target": "com.amazonaws.lambda#Principal", "traits": { - "smithy.api#documentation": "

The Amazon Web Servicesservice or Amazon Web Services account that invokes the function. If you specify a\n service, use SourceArn or SourceAccount to limit who can invoke the function through\n that service.

", + "smithy.api#documentation": "

The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a\n service, use SourceArn or SourceAccount to limit who can invoke the function through\n that service.

", "smithy.api#required": {} } }, "SourceArn": { "target": "com.amazonaws.lambda#Arn", "traits": { - "smithy.api#documentation": "

For Amazon Web Servicesservices, the ARN of the Amazon Web Services resource that invokes the function. For\n example, an Amazon S3 bucket or Amazon SNS topic.

\n

Note that Lambda configures the comparison using the StringLike operator.

" + "smithy.api#documentation": "

For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For\n example, an Amazon S3 bucket or Amazon SNS topic.

\n

Note that Lambda configures the comparison using the StringLike operator.

" } }, "SourceAccount": { "target": "com.amazonaws.lambda#SourceOwner", "traits": { - "smithy.api#documentation": "

For Amazon Web Servicesservice, the ID of the Amazon Web Services account that owns the resource. Use this\n together with SourceArn to ensure that the specified account owns the resource. It is possible for an\n Amazon S3 bucket to be deleted by its owner and recreated by another account.

" + "smithy.api#documentation": "

For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this\n together with SourceArn to ensure that the specified account owns the resource. It is possible for an\n Amazon S3 bucket to be deleted by its owner and recreated by another account.

" } }, "EventSourceToken": { @@ -2379,6 +2426,25 @@ ], "traits": { "smithy.api#documentation": "

Creates an alias for a\n Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a\n different version.

\n

You can also map an alias to split invocation requests between two versions. Use the\n RoutingConfig parameter to specify a second version and the percentage of invocation requests that\n it receives.

", + "smithy.api#examples": [ + { + "title": "To create an alias for a Lambda function", + "documentation": "The following example creates an alias named LIVE that points to version 1 of the my-function Lambda function.", + "input": { + "FunctionName": "my-function", + "Name": "LIVE", + "FunctionVersion": "1", + "Description": "alias for live version of function" + }, + "output": { + "FunctionVersion": "1", + "Name": "LIVE", + "AliasArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function:LIVE", + "RevisionId": "873282ed-xmpl-4dc8-a069-d0c647e470c6", + "Description": "alias for live version of function" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2015-03-31/functions/{FunctionName}/aliases", @@ -2474,6 +2540,12 @@ "traits": { "smithy.api#documentation": "

The code signing policies define the actions to take if the validation checks fail.

" } + }, + "Tags": { + "target": "com.amazonaws.lambda#Tags", + "traits": { + "smithy.api#documentation": "

A list of tags to add to the code signing configuration.

" + } } }, "traits": { @@ -2522,6 +2594,26 @@ ], "traits": { "smithy.api#documentation": "

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

\n

For details about how to configure different event sources, see the following topics.

\n \n

The following error handling options are available only for stream sources (DynamoDB and Kinesis):

\n
    \n
  • \n

    \n BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.

    \n
  • \n
  • \n

    \n DestinationConfig – Send discarded records to an Amazon SQS queue or Amazon SNS topic.

    \n
  • \n
  • \n

    \n MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

    \n
  • \n
  • \n

    \n MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

    \n
  • \n
  • \n

    \n ParallelizationFactor – Process multiple batches from each shard concurrently.

    \n
  • \n
\n

For information about which configuration parameters apply to each event source, see the following topics.

\n ", + "smithy.api#examples": [ + { + "title": "To create a mapping between an event source and an AWS Lambda function", + "documentation": "The following example creates a mapping between an SQS queue and the my-function Lambda function.", + "input": { + "EventSourceArn": "arn:aws:sqs:us-west-2:123456789012:my-queue", + "FunctionName": "my-function", + "BatchSize": 5 + }, + "output": { + "UUID": "a1b2c3d4-5678-90ab-cdef-11111EXAMPLE", + "StateTransitionReason": "USER_INITIATED", + "LastModified": 1.569284520333E9, + "BatchSize": 5, + "State": "Creating", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "EventSourceArn": "arn:aws:sqs:us-west-2:123456789012:my-queue" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2015-03-31/event-source-mappings", @@ -2611,6 +2703,12 @@ "smithy.api#documentation": "

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" } }, + "Tags": { + "target": "com.amazonaws.lambda#Tags", + "traits": { + "smithy.api#documentation": "

A list of tags to apply to the event source mapping.

" + } + }, "TumblingWindowInSeconds": { "target": "com.amazonaws.lambda#TumblingWindowInSeconds", "traits": { @@ -2720,7 +2818,67 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The\n deployment package is a .zip file archive or container image that contains your function code. The execution role\n grants the function permission to use Amazon Web Servicesservices, such as Amazon CloudWatch Logs for log\n streaming and X-Ray for request tracing.

\n

If the deployment package is a container\n image, then you set the package type to Image. For a container image, the code property\n must include the URI of a container image in the Amazon ECR registry. You do not need to specify the\n handler and runtime properties.

\n

If the deployment package is a .zip file archive, then\n you set the package type to Zip. For a .zip file archive, the code property specifies the location of\n the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must\n be compatible with the target instruction set architecture of the function (x86-64 or\n arm64). If you do not specify the architecture, then the default value is\n x86-64.

\n

When you create a function, Lambda provisions an instance of the function and its supporting\n resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't\n invoke or modify the function. The State, StateReason, and StateReasonCode\n fields in the response from GetFunctionConfiguration indicate when the function is ready to\n invoke. For more information, see Lambda function states.

\n

A function has an unpublished version, and can have published versions and aliases. The unpublished version\n changes when you update your function's code and configuration. A published version is a snapshot of your function\n code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be\n changed to map to a different version. Use the Publish parameter to create version 1 of\n your function from its initial configuration.

\n

The other parameters let you configure version-specific and function-level settings. You can modify\n version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply\n to both the unpublished and published versions of the function, and include tags (TagResource)\n and per-function concurrency limits (PutFunctionConcurrency).

\n

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this\n function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with\n UpdateFunctionCode, Lambda checks that the code package has a valid signature from\n a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted\n publishers for this function.

\n

If another Amazon Web Services account or an Amazon Web Servicesservice invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.

\n

To invoke your function directly, use Invoke. To invoke your function in response to events\n in other Amazon Web Servicesservices, create an event source mapping (CreateEventSourceMapping),\n or configure a function trigger in the other service. For more information, see Invoking Lambda\n functions.

", + "smithy.api#documentation": "

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The\n deployment package is a .zip file archive or container image that contains your function code. The execution role\n grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log\n streaming and X-Ray for request tracing.

\n

If the deployment package is a container\n image, then you set the package type to Image. For a container image, the code property\n must include the URI of a container image in the Amazon ECR registry. You do not need to specify the\n handler and runtime properties.

\n

If the deployment package is a .zip file archive, then\n you set the package type to Zip. For a .zip file archive, the code property specifies the location of\n the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must\n be compatible with the target instruction set architecture of the function (x86-64 or\n arm64). If you do not specify the architecture, then the default value is\n x86-64.

\n

When you create a function, Lambda provisions an instance of the function and its supporting\n resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't\n invoke or modify the function. The State, StateReason, and StateReasonCode\n fields in the response from GetFunctionConfiguration indicate when the function is ready to\n invoke. For more information, see Lambda function states.

\n

A function has an unpublished version, and can have published versions and aliases. The unpublished version\n changes when you update your function's code and configuration. A published version is a snapshot of your function\n code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be\n changed to map to a different version. Use the Publish parameter to create version 1 of\n your function from its initial configuration.

\n

The other parameters let you configure version-specific and function-level settings. You can modify\n version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply\n to both the unpublished and published versions of the function, and include tags (TagResource)\n and per-function concurrency limits (PutFunctionConcurrency).

\n

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this\n function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with\n UpdateFunctionCode, Lambda checks that the code package has a valid signature from\n a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted\n publishers for this function.

\n

If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.

\n

To invoke your function directly, use Invoke. To invoke your function in response to events\n in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping),\n or configure a function trigger in the other service. For more information, see Invoking Lambda\n functions.

", + "smithy.api#examples": [ + { + "title": "To create a function", + "documentation": "The following example creates a function with a deployment package in Amazon S3 and enables X-Ray tracing and environment variable encryption.", + "input": { + "FunctionName": "my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "Code": { + "S3Bucket": "my-bucket-1xpuxmplzrlbh", + "S3Key": "function.zip" + }, + "Description": "Process image objects from Amazon S3.", + "Timeout": 15, + "MemorySize": 256, + "Publish": true, + "Environment": { + "Variables": { + "BUCKET": "my-bucket-1xpuxmplzrlbh", + "PREFIX": "inbound" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "Tags": { + "DEPARTMENT": "Assets" + } + }, + "output": { + "FunctionName": "my-function", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "CodeSize": 5797206, + "Description": "Process image objects from Amazon S3.", + "Timeout": 15, + "MemorySize": 256, + "LastModified": "2020-04-10T19:06:32.563+0000", + "CodeSha256": "YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=", + "Version": "1", + "Environment": { + "Variables": { + "PREFIX": "inbound", + "BUCKET": "my-bucket-1xpuxmplzrlbh" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "RevisionId": "b75dcd81-xmpl-48a8-a75a-93ba8b5b9727", + "State": "Active", + "LastUpdateStatus": "Successful" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2015-03-31/functions", @@ -3059,6 +3217,16 @@ ], "traits": { "smithy.api#documentation": "

Deletes a Lambda function alias.

", + "smithy.api#examples": [ + { + "title": "To delete a Lambda function alias", + "documentation": "The following example deletes an alias named BLUE from a function named my-function", + "input": { + "FunctionName": "my-function", + "Name": "BLUE" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2015-03-31/functions/{FunctionName}/aliases/{Name}", @@ -3223,7 +3391,17 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter.\n Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit\n permissions for DeleteAlias.

\n

To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Servicesservices and resources that invoke your function\n directly, delete the trigger in the service where you originally configured it.

", + "smithy.api#documentation": "

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter.\n Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit\n permissions for DeleteAlias.

\n

To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function\n directly, delete the trigger in the service where you originally configured it.

", + "smithy.api#examples": [ + { + "title": "To delete a version of a Lambda function", + "documentation": "The following example deletes version 1 of a Lambda function named my-function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "1" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2015-03-31/functions/{FunctionName}", @@ -3311,6 +3489,15 @@ ], "traits": { "smithy.api#documentation": "

Removes a concurrent execution limit from a function.

", + "smithy.api#examples": [ + { + "title": "To remove the reserved concurrent execution limit from a function", + "documentation": "The following example deletes the reserved concurrent execution limit from a function named my-function.", + "input": { + "FunctionName": "my-function" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2017-10-31/functions/{FunctionName}/concurrency", @@ -3361,6 +3548,16 @@ ], "traits": { "smithy.api#documentation": "

Deletes the configuration for asynchronous invocation for a function, version, or alias.

\n

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", + "smithy.api#examples": [ + { + "title": "To delete an asynchronous invocation configuration", + "documentation": "The following example deletes the asynchronous invocation configuration for the GREEN alias of a function named my-function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "GREEN" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2019-09-25/functions/{FunctionName}/event-invoke-config", @@ -3486,6 +3683,16 @@ ], "traits": { "smithy.api#documentation": "

Deletes a version of an Lambda\n layer. Deleted versions can no longer be viewed or added to functions. To avoid\n breaking functions, a copy of the version remains in Lambda until no functions refer to it.

", + "smithy.api#examples": [ + { + "title": "To delete a version of a Lambda layer", + "documentation": "The following example deletes version 2 of a layer named my-layer.", + "input": { + "LayerName": "my-layer", + "VersionNumber": 2 + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", @@ -3545,6 +3752,16 @@ ], "traits": { "smithy.api#documentation": "

Deletes the provisioned concurrency configuration for a function.

", + "smithy.api#examples": [ + { + "title": "To delete a provisioned concurrency configuration", + "documentation": "The following example deletes the provisioned concurrency configuration for the GREEN alias of a function named my-function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "GREEN" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2019-09-30/functions/{FunctionName}/provisioned-concurrency", @@ -3926,6 +4143,16 @@ } } }, + "com.amazonaws.lambda#EventSourceMappingArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 85, + "max": 120 + }, + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + } + }, "com.amazonaws.lambda#EventSourceMappingConfiguration": { "type": "structure", "members": { @@ -4102,6 +4329,12 @@ "traits": { "smithy.api#documentation": "

An object that contains details about an error related to filter criteria encryption.

" } + }, + "EventSourceMappingArn": { + "target": "com.amazonaws.lambda#EventSourceMappingArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the event source mapping.

" + } } }, "traits": { @@ -4795,6 +5028,25 @@ ], "traits": { "smithy.api#documentation": "

Retrieves details about your account's limits and usage in an Amazon Web Services Region.

", + "smithy.api#examples": [ + { + "title": "To get account settings", + "documentation": "This operation takes no parameters and returns details about storage and concurrency quotas in the current Region.", + "output": { + "AccountLimit": { + "CodeSizeUnzipped": 262144000, + "UnreservedConcurrentExecutions": 1000, + "ConcurrentExecutions": 1000, + "CodeSizeZipped": 52428800, + "TotalCodeSize": 80530636800 + }, + "AccountUsage": { + "FunctionCount": 4, + "TotalCodeSize": 9426 + } + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2016-08-19/account-settings", @@ -4853,6 +5105,23 @@ ], "traits": { "smithy.api#documentation": "

Returns details about a Lambda function alias.

", + "smithy.api#examples": [ + { + "title": "To get a Lambda function alias", + "documentation": "The following example returns details about an alias named BLUE for a function named my-function", + "input": { + "FunctionName": "my-function", + "Name": "BLUE" + }, + "output": { + "AliasArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function:BLUE", + "Name": "BLUE", + "FunctionVersion": "3", + "RevisionId": "594f41fb-xmpl-4c20-95c7-6ca5f2a92c93", + "Description": "Production environment BLUE." + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions/{FunctionName}/aliases/{Name}", @@ -5014,6 +5283,52 @@ ], "traits": { "smithy.api#documentation": "

Returns information about the function or function version, with a link to download the deployment package\n that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are\n returned.

", + "smithy.api#examples": [ + { + "title": "To get a Lambda function", + "documentation": "The following example returns code and configuration details for version 1 of a function named my-function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "1" + }, + "output": { + "Configuration": { + "FunctionName": "my-function", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "CodeSize": 5797206, + "Description": "Process image objects from Amazon S3.", + "Timeout": 15, + "MemorySize": 256, + "LastModified": "2020-04-10T19:06:32.563+0000", + "CodeSha256": "YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=", + "Version": "$LATEST", + "Environment": { + "Variables": { + "PREFIX": "inbound", + "BUCKET": "my-bucket-1xpuxmplzrlbh" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "RevisionId": "b75dcd81-xmpl-48a8-a75a-93ba8b5b9727", + "State": "Active", + "LastUpdateStatus": "Successful" + }, + "Code": { + "RepositoryType": "S3", + "Location": "https://awslambda-us-west-2-tasks.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-function-e7d9d1ed-xmpl-4f79-904a-4b87f2681f30?versionId=sH3TQwBOaUy..." + }, + "Tags": { + "DEPARTMENT": "Assets" + } + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions/{FunctionName}", @@ -5205,6 +5520,18 @@ ], "traits": { "smithy.api#documentation": "

Returns details about the reserved concurrency configuration for a function. To set a concurrency limit for a\n function, use PutFunctionConcurrency.

", + "smithy.api#examples": [ + { + "title": "To get the reserved concurrency setting for a function", + "documentation": "The following example returns the reserved concurrency setting for a function named my-function.", + "input": { + "FunctionName": "my-function" + }, + "output": { + "ReservedConcurrentExecutions": 250 + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2019-09-30/functions/{FunctionName}/concurrency", @@ -5266,6 +5593,43 @@ ], "traits": { "smithy.api#documentation": "

Returns the version-specific settings of a Lambda function or version. The output includes only options that\n can vary between versions of a function. To modify these settings, use UpdateFunctionConfiguration.

\n

To get all of a function's details, including function-level settings, use GetFunction.

", + "smithy.api#examples": [ + { + "title": "To get a Lambda function's event source mapping", + "documentation": "The following example returns and configuration details for version 1 of a function named my-function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "1" + }, + "output": { + "FunctionName": "my-function", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "CodeSize": 5797206, + "Description": "Process image objects from Amazon S3.", + "Timeout": 15, + "MemorySize": 256, + "LastModified": "2020-04-10T19:06:32.563+0000", + "CodeSha256": "YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=", + "Version": "$LATEST", + "Environment": { + "Variables": { + "PREFIX": "inbound", + "BUCKET": "my-bucket-1xpuxmplzrlbh" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "RevisionId": "b75dcd81-xmpl-48a8-a75a-93ba8b5b9727", + "State": "Active", + "LastUpdateStatus": "Successful" + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions/{FunctionName}/configuration", @@ -5709,6 +6073,33 @@ ], "traits": { "smithy.api#documentation": "

Returns information about a version of an Lambda\n layer, with a link to download the layer archive\n that's valid for 10 minutes.

", + "smithy.api#examples": [ + { + "title": "To get information about a Lambda layer version", + "documentation": "The following example returns information for version 1 of a layer named my-layer.", + "input": { + "LayerName": "my-layer", + "VersionNumber": 1 + }, + "output": { + "Content": { + "Location": "https://awslambda-us-east-2-layers.s3.us-east-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb-ff77-4b0a-ad92-5b78a716a96a?versionId=27iWyA73cCAYqyH...", + "CodeSha256": "tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=", + "CodeSize": 169 + }, + "LayerArn": "arn:aws:lambda:us-east-2:123456789012:layer:my-layer", + "LayerVersionArn": "arn:aws:lambda:us-east-2:123456789012:layer:my-layer:1", + "Description": "My Python layer", + "CreatedDate": "2018-11-14T23:03:52.894+0000", + "Version": 1, + "LicenseInfo": "MIT", + "CompatibleRuntimes": [ + "python3.6", + "python3.7" + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", @@ -5740,6 +6131,30 @@ ], "traits": { "smithy.api#documentation": "

Returns information about a version of an Lambda\n layer, with a link to download the layer archive\n that's valid for 10 minutes.

", + "smithy.api#examples": [ + { + "title": "To get information about a Lambda layer version", + "documentation": "The following example returns information about the layer version with the specified Amazon Resource Name (ARN).", + "input": { + "Arn": "arn:aws:lambda:ca-central-1:123456789012:layer:blank-python-lib:3" + }, + "output": { + "Content": { + "Location": "https://awslambda-us-east-2-layers.s3.us-east-2.amazonaws.com/snapshots/123456789012/blank-python-lib-e5212378-xmpl-44ee-8398-9d8ec5113949?versionId=WbZnvf...", + "CodeSha256": "6x+xmpl/M3BnQUk7gS9sGmfeFsR/npojXoA3fZUv4eU=", + "CodeSize": 9529009 + }, + "LayerArn": "arn:aws:lambda:us-east-2:123456789012:layer:blank-python-lib", + "LayerVersionArn": "arn:aws:lambda:us-east-2:123456789012:layer:blank-python-lib:3", + "Description": "Dependencies for the blank-python sample app.", + "CreatedDate": "2020-03-31T00:35:18.949+0000", + "Version": 3, + "CompatibleRuntimes": [ + "python3.8" + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2018-10-31/layers?find=LayerVersion", @@ -5948,6 +6363,20 @@ ], "traits": { "smithy.api#documentation": "

Returns the resource-based IAM policy for a function, version, or alias.

", + "smithy.api#examples": [ + { + "title": "To retrieve a Lambda function policy", + "documentation": "The following example returns the resource-based policy for version 1 of a Lambda function named my-function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "1" + }, + "output": { + "Policy": "{\"Version\":\"2012-10-17\",\"Id\":\"default\",\"Statement\":[{\"Sid\":\"xaccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::123456789012:root\"},\"Action\":\"lambda:InvokeFunction\",\"Resource\":\"arn:aws:lambda:us-east-2:123456789012:function:my-function:1\"}]}", + "RevisionId": "4843f2f6-7c59-4fda-b484-afd0bc0e22b8" + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions/{FunctionName}/policy", @@ -6025,6 +6454,38 @@ ], "traits": { "smithy.api#documentation": "

Retrieves the provisioned concurrency configuration for a function's alias or version.

", + "smithy.api#examples": [ + { + "title": "To get a provisioned concurrency configuration", + "documentation": "The following example returns details for the provisioned concurrency configuration for the BLUE alias of the specified function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "BLUE" + }, + "output": { + "RequestedProvisionedConcurrentExecutions": 100, + "AvailableProvisionedConcurrentExecutions": 100, + "AllocatedProvisionedConcurrentExecutions": 100, + "Status": "READY", + "LastModified": "2019-12-31T20:28:49+0000" + } + }, + { + "title": "To view a provisioned concurrency configuration", + "documentation": "The following example displays details for the provisioned concurrency configuration for the BLUE alias of the specified function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "BLUE" + }, + "output": { + "RequestedProvisionedConcurrentExecutions": 100, + "AvailableProvisionedConcurrentExecutions": 100, + "AllocatedProvisionedConcurrentExecutions": 100, + "Status": "READY", + "LastModified": "2019-12-31T20:28:49+0000" + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2019-09-30/functions/{FunctionName}/provisioned-concurrency", @@ -6634,6 +7095,35 @@ ], "traits": { "smithy.api#documentation": "

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or\n asynchronously. By default, Lambda invokes your function synchronously (i.e. theInvocationType\n is RequestResponse). To invoke a function asynchronously, set InvocationType to\n Event. Lambda passes the ClientContext object to your function for\n synchronous invocations only.

\n

For synchronous invocation,\n details about the function response, including errors, are included in the response body and headers. For either\n invocation type, you can find more information in the execution log and trace.

\n

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type,\n client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an\n error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in\n Lambda.

\n

For asynchronous invocation,\n Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity\n to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple\n times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

\n

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that\n prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and\n configuration. For example, Lambda returns TooManyRequestsException if running the\n function would cause you to exceed a concurrency limit at either the account level\n (ConcurrentInvocationLimitExceeded) or function level\n (ReservedFunctionConcurrentInvocationLimitExceeded).

\n

For functions with a long timeout, your client might disconnect during synchronous invocation while it waits\n for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long\n connections with timeout or keep-alive settings.

\n

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up\n permissions for cross-account invocations, see Granting function\n access to other accounts.

", + "smithy.api#examples": [ + { + "title": "To invoke a Lambda function", + "documentation": "The following example invokes version 1 of a function named my-function with an empty event payload.", + "input": { + "FunctionName": "my-function", + "Payload": "{}", + "Qualifier": "1" + }, + "output": { + "StatusCode": 200, + "Payload": "200 SUCCESS" + } + }, + { + "title": "To invoke a Lambda function asynchronously", + "documentation": "The following example invokes version 1 of a function named my-function asynchronously.", + "input": { + "FunctionName": "my-function", + "Payload": "{}", + "InvocationType": "Event", + "Qualifier": "1" + }, + "output": { + "StatusCode": 202, + "Payload": "" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2015-03-31/functions/{FunctionName}/invocations", @@ -6669,6 +7159,19 @@ "traits": { "smithy.api#deprecated": {}, "smithy.api#documentation": "\n

For asynchronous function invocation, use Invoke.

\n
\n

Invokes a function asynchronously.

\n \n

If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not \n propagated to the function, even if X-Ray active tracing is turned on.

\n
", + "smithy.api#examples": [ + { + "title": "To invoke a Lambda function asynchronously", + "documentation": "The following example invokes a Lambda function asynchronously", + "input": { + "FunctionName": "my-function", + "InvokeArgs": "{}" + }, + "output": { + "Status": 202 + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2014-11-13/functions/{FunctionName}/invoke-async", @@ -7512,6 +8015,38 @@ ], "traits": { "smithy.api#documentation": "

Returns a list of aliases\n for a Lambda function.

", + "smithy.api#examples": [ + { + "title": "To list a function's aliases", + "documentation": "The following example returns a list of aliases for a function named my-function.", + "input": { + "FunctionName": "my-function" + }, + "output": { + "Aliases": [ + { + "AliasArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function:BETA", + "RevisionId": "a410117f-xmpl-494e-8035-7e204bb7933b", + "FunctionVersion": "2", + "Name": "BLUE", + "Description": "Production environment BLUE.", + "RoutingConfig": { + "AdditionalVersionWeights": { + "1": 0.7 + } + } + }, + { + "AliasArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function:LIVE", + "RevisionId": "21d40116-xmpl-40ba-9360-3ea284da1bb5", + "FunctionVersion": "1", + "Name": "GREEN", + "Description": "Production environment GREEN." + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions/{FunctionName}/aliases", @@ -7679,6 +8214,28 @@ ], "traits": { "smithy.api#documentation": "

Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a\n single event source.

", + "smithy.api#examples": [ + { + "title": "To list the event source mappings for a function", + "documentation": "The following example returns a list of the event source mappings for a function named my-function.", + "input": { + "FunctionName": "my-function" + }, + "output": { + "EventSourceMappings": [ + { + "UUID": "a1b2c3d4-5678-90ab-cdef-11111EXAMPLE", + "StateTransitionReason": "USER_INITIATED", + "LastModified": 1.569284520333E9, + "BatchSize": 5, + "State": "Enabled", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "EventSourceArn": "arn:aws:sqs:us-west-2:123456789012:mySQSqueue" + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/event-source-mappings", @@ -7772,6 +8329,31 @@ ], "traits": { "smithy.api#documentation": "

Retrieves a list of configurations for asynchronous invocation for a function.

\n

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", + "smithy.api#examples": [ + { + "title": "To view a list of asynchronous invocation configurations", + "documentation": "The following example returns a list of asynchronous invocation configurations for a function named my-function.", + "input": { + "FunctionName": "my-function" + }, + "output": { + "FunctionEventInvokeConfigs": [ + { + "LastModified": 1.577824406719E9, + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function:GREEN", + "MaximumRetryAttempts": 2, + "MaximumEventAgeInSeconds": 1800 + }, + { + "LastModified": 1.577824396653E9, + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function:BLUE", + "MaximumRetryAttempts": 0, + "MaximumEventAgeInSeconds": 3600 + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2019-09-25/functions/{FunctionName}/event-invoke-config/list", @@ -7944,6 +8526,58 @@ ], "traits": { "smithy.api#documentation": "

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50\n functions per call.

\n

Set FunctionVersion to ALL to include all published versions of each function in\n addition to the unpublished version.

\n \n

The ListFunctions operation returns a subset of the FunctionConfiguration fields.\n To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason,\n LastUpdateStatusReasonCode, RuntimeVersionConfig) for a function or version, use GetFunction.

\n
", + "smithy.api#examples": [ + { + "title": "To get a list of Lambda functions", + "documentation": "This operation returns a list of Lambda functions.", + "output": { + "NextMarker": "", + "Functions": [ + { + "TracingConfig": { + "Mode": "PassThrough" + }, + "Version": "$LATEST", + "CodeSha256": "dBG9m8SGdmlEjw/JYXlhhvCrAv5TxvXsbL/RMr0fT/I=", + "FunctionName": "helloworld", + "MemorySize": 128, + "RevisionId": "1718e831-badf-4253-9518-d0644210af7b", + "CodeSize": 294, + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:helloworld", + "Handler": "helloworld.handler", + "Role": "arn:aws:iam::123456789012:role/service-role/MyTestFunction-role-zgur6bf4", + "Timeout": 3, + "LastModified": "2019-09-23T18:32:33.857+0000", + "Runtime": "nodejs10.x", + "Description": "" + }, + { + "TracingConfig": { + "Mode": "PassThrough" + }, + "Version": "$LATEST", + "CodeSha256": "sU0cJ2/hOZevwV/lTxCuQqK3gDZP3i8gUoqUUVRmY6E=", + "FunctionName": "my-function", + "VpcConfig": { + "SubnetIds": [], + "VpcId": "", + "SecurityGroupIds": [] + }, + "MemorySize": 256, + "RevisionId": "93017fc9-59cb-41dc-901b-4845ce4bf668", + "CodeSize": 266, + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Handler": "index.handler", + "Role": "arn:aws:iam::123456789012:role/service-role/helloWorldPython-role-uy3l9qyq", + "Timeout": 3, + "LastModified": "2019-10-01T16:47:28.490+0000", + "Runtime": "nodejs10.x", + "Description": "" + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions", @@ -8122,6 +8756,37 @@ ], "traits": { "smithy.api#documentation": "

Lists the versions of an Lambda\n layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only\n versions that indicate that they're compatible with that runtime. Specify a compatible architecture to include only \n layer versions that are compatible with that architecture.

", + "smithy.api#examples": [ + { + "title": "To list versions of a layer", + "documentation": "The following example displays information about the versions for the layer named blank-java-lib", + "input": { + "LayerName": "blank-java-lib" + }, + "output": { + "LayerVersions": [ + { + "LayerVersionArn": "arn:aws:lambda:us-east-2:123456789012:layer:blank-java-lib:7", + "Version": 7, + "Description": "Dependencies for the blank-java sample app.", + "CreatedDate": "2020-03-18T23:38:42.284+0000", + "CompatibleRuntimes": [ + "java8" + ] + }, + { + "LayerVersionArn": "arn:aws:lambda:us-east-2:123456789012:layer:blank-java-lib:6", + "Version": 6, + "Description": "Dependencies for the blank-java sample app.", + "CreatedDate": "2020-03-17T07:24:21.960+0000", + "CompatibleRuntimes": [ + "java8" + ] + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2018-10-31/layers/{LayerName}/versions", @@ -8220,6 +8885,33 @@ ], "traits": { "smithy.api#documentation": "

Lists Lambda\n layers and shows information about the latest version of each. Specify a\n runtime\n identifier to list only layers that indicate that they're compatible with that\n runtime. Specify a compatible architecture to include only layers that are compatible with\n that instruction set architecture.

", + "smithy.api#examples": [ + { + "title": "To list the layers that are compatible with your function's runtime", + "documentation": "The following example returns information about layers that are compatible with the Python 3.7 runtime.", + "input": { + "CompatibleRuntime": "python3.7" + }, + "output": { + "Layers": [ + { + "LayerName": "my-layer", + "LayerArn": "arn:aws:lambda:us-east-2:123456789012:layer:my-layer", + "LatestMatchingVersion": { + "LayerVersionArn": "arn:aws:lambda:us-east-2:123456789012:layer:my-layer:2", + "Version": 2, + "Description": "My layer", + "CreatedDate": "2018-11-15T00:37:46.592+0000", + "CompatibleRuntimes": [ + "python3.6", + "python3.7" + ] + } + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2018-10-31/layers", @@ -8313,6 +9005,35 @@ ], "traits": { "smithy.api#documentation": "

Retrieves a list of provisioned concurrency configurations for a function.

", + "smithy.api#examples": [ + { + "title": "To get a list of provisioned concurrency configurations", + "documentation": "The following example returns a list of provisioned concurrency configurations for a function named my-function.", + "input": { + "FunctionName": "my-function" + }, + "output": { + "ProvisionedConcurrencyConfigs": [ + { + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function:GREEN", + "RequestedProvisionedConcurrentExecutions": 100, + "AvailableProvisionedConcurrentExecutions": 100, + "AllocatedProvisionedConcurrentExecutions": 100, + "Status": "READY", + "LastModified": "2019-12-31T20:29:00+0000" + }, + { + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function:BLUE", + "RequestedProvisionedConcurrentExecutions": 100, + "AvailableProvisionedConcurrentExecutions": 100, + "AllocatedProvisionedConcurrentExecutions": 100, + "Status": "READY", + "LastModified": "2019-12-31T20:28:49+0000" + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2019-09-30/functions/{FunctionName}/provisioned-concurrency?List=ALL", @@ -8399,7 +9120,22 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a function's tags. You can\n also view tags with GetFunction.

", + "smithy.api#documentation": "

Returns a function, event source mapping, or code signing configuration's tags. You can\n also view function tags with GetFunction.

", + "smithy.api#examples": [ + { + "title": "To retrieve the list of tags for a Lambda function", + "documentation": "The following example displays the tags attached to the my-function Lambda function.", + "input": { + "Resource": "arn:aws:lambda:us-west-2:123456789012:function:my-function" + }, + "output": { + "Tags": { + "Category": "Web Tools", + "Department": "Sales" + } + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2017-03-31/tags/{Resource}", @@ -8411,9 +9147,9 @@ "type": "structure", "members": { "Resource": { - "target": "com.amazonaws.lambda#FunctionArn", + "target": "com.amazonaws.lambda#TaggableResource", "traits": { - "smithy.api#documentation": "

The function's Amazon Resource Name (ARN). \n Note: Lambda does not support adding tags to aliases or versions.

", + "smithy.api#documentation": "

The resource's Amazon Resource Name (ARN). \n Note: Lambda does not support adding tags to function aliases or versions.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -8461,6 +9197,69 @@ ], "traits": { "smithy.api#documentation": "

Returns a list of versions,\n with the version-specific configuration of each. Lambda returns up to 50 versions per call.

", + "smithy.api#examples": [ + { + "title": "To list versions of a function", + "documentation": "The following example returns a list of versions of a function named my-function", + "input": { + "FunctionName": "my-function" + }, + "output": { + "Versions": [ + { + "FunctionName": "my-function", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "CodeSize": 5797206, + "Description": "Process image objects from Amazon S3.", + "Timeout": 15, + "MemorySize": 256, + "LastModified": "2020-04-10T19:06:32.563+0000", + "CodeSha256": "YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=", + "Version": "$LATEST", + "Environment": { + "Variables": { + "PREFIX": "inbound", + "BUCKET": "my-bucket-1xpuxmplzrlbh" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "RevisionId": "850ca006-2d98-4ff4-86db-8766e9d32fe9" + }, + { + "FunctionName": "my-function", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "CodeSize": 5797206, + "Description": "Process image objects from Amazon S3.", + "Timeout": 5, + "MemorySize": 256, + "LastModified": "2020-04-10T19:06:32.563+0000", + "CodeSha256": "YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=", + "Version": "1", + "Environment": { + "Variables": { + "PREFIX": "inbound", + "BUCKET": "my-bucket-1xpuxmplzrlbh" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "RevisionId": "b75dcd81-xmpl-48a8-a75a-93ba8b5b9727" + } + ] + } + } + ], "smithy.api#http": { "method": "GET", "uri": "/2015-03-31/functions/{FunctionName}/versions", @@ -9054,6 +9853,42 @@ ], "traits": { "smithy.api#documentation": "

Creates an Lambda\n layer from a ZIP archive. Each time you call PublishLayerVersion with the same\n layer name, a new version is created.

\n

Add layers to your function with CreateFunction or UpdateFunctionConfiguration.

", + "smithy.api#examples": [ + { + "title": "To create a Lambda layer version", + "documentation": "The following example creates a new Python library layer version. The command retrieves the layer content a file named layer.zip in the specified S3 bucket.", + "input": { + "LayerName": "my-layer", + "Description": "My Python layer", + "Content": { + "S3Bucket": "lambda-layers-us-west-2-123456789012", + "S3Key": "layer.zip" + }, + "CompatibleRuntimes": [ + "python3.6", + "python3.7" + ], + "LicenseInfo": "MIT" + }, + "output": { + "Content": { + "Location": "https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb-ff77-4b0a-ad92-5b78a716a96a?versionId=27iWyA73cCAYqyH...", + "CodeSha256": "tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=", + "CodeSize": 169 + }, + "LayerArn": "arn:aws:lambda:us-west-2:123456789012:layer:my-layer", + "LayerVersionArn": "arn:aws:lambda:us-west-2:123456789012:layer:my-layer:1", + "Description": "My Python layer", + "CreatedDate": "2018-11-14T23:03:52.894+0000", + "Version": 1, + "LicenseInfo": "MIT", + "CompatibleRuntimes": [ + "python3.6", + "python3.7" + ] + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2018-10-31/layers/{LayerName}/versions", @@ -9204,6 +10039,44 @@ ], "traits": { "smithy.api#documentation": "

Creates a version from the\n current code and configuration of a function. Use versions to create a snapshot of your function code and\n configuration that doesn't change.

\n

Lambda doesn't publish a version if the function's configuration and code haven't changed since the last\n version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the\n function before publishing a version.

\n

Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.

", + "smithy.api#examples": [ + { + "title": "To publish a version of a Lambda function", + "documentation": "This operation publishes a version of a Lambda function", + "input": { + "FunctionName": "myFunction", + "CodeSha256": "", + "Description": "" + }, + "output": { + "FunctionName": "my-function", + "FunctionArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Runtime": "nodejs12.x", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Handler": "index.handler", + "CodeSize": 5797206, + "Description": "Process image objects from Amazon S3.", + "Timeout": 5, + "MemorySize": 256, + "LastModified": "2020-04-10T19:06:32.563+0000", + "CodeSha256": "YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=", + "Version": "1", + "Environment": { + "Variables": { + "PREFIX": "inbound", + "BUCKET": "my-bucket-1xpuxmplzrlbh" + } + }, + "KMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966", + "TracingConfig": { + "Mode": "Active" + }, + "RevisionId": "b75dcd81-xmpl-48a8-a75a-93ba8b5b9727", + "State": "Active", + "LastUpdateStatus": "Successful" + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2015-03-31/functions/{FunctionName}/versions", @@ -9354,6 +10227,19 @@ ], "traits": { "smithy.api#documentation": "

Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency\n level.

\n

Concurrency settings apply to the function as a whole, including all published versions and the unpublished\n version. Reserving concurrency both ensures that your function has capacity to process the specified number of\n events simultaneously, and prevents it from scaling beyond that level. Use GetFunction to see\n the current setting for a function.

\n

Use GetAccountSettings to see your Regional concurrency limit. You can reserve concurrency\n for as many functions as you like, as long as you leave at least 100 simultaneous executions unreserved for\n functions that aren't configured with a per-function limit. For more information, see Lambda function scaling.

", + "smithy.api#examples": [ + { + "title": "To configure a reserved concurrency limit for a function", + "documentation": "The following example configures 100 reserved concurrent executions for the my-function function.", + "input": { + "FunctionName": "my-function", + "ReservedConcurrentExecutions": 100 + }, + "output": { + "ReservedConcurrentExecutions": 100 + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/2017-10-31/functions/{FunctionName}/concurrency", @@ -9557,6 +10443,23 @@ ], "traits": { "smithy.api#documentation": "

Adds a provisioned concurrency configuration to a function's alias or version.

", + "smithy.api#examples": [ + { + "title": "To allocate provisioned concurrency", + "documentation": "The following example allocates 100 provisioned concurrency for the BLUE alias of the specified function.", + "input": { + "FunctionName": "my-function", + "Qualifier": "BLUE", + "ProvisionedConcurrentExecutions": 100 + }, + "output": { + "RequestedProvisionedConcurrentExecutions": 100, + "AllocatedProvisionedConcurrentExecutions": 0, + "Status": "IN_PROGRESS", + "LastModified": "2019-11-21T19:32:12+0000" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/2019-09-30/functions/{FunctionName}/provisioned-concurrency", @@ -9835,6 +10738,17 @@ ], "traits": { "smithy.api#documentation": "

Removes a statement from the permissions policy for a version of an Lambda\n layer. For more information, see\n AddLayerVersionPermission.

", + "smithy.api#examples": [ + { + "title": "To delete layer-version permissions", + "documentation": "The following example deletes permission for an account to configure a layer version.", + "input": { + "LayerName": "my-layer", + "VersionNumber": 1, + "StatementId": "xaccount" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId}", @@ -9908,7 +10822,18 @@ } ], "traits": { - "smithy.api#documentation": "

Revokes function-use permission from an Amazon Web Servicesservice or another Amazon Web Services account. You\n can get the ID of the statement from the output of GetPolicy.

", + "smithy.api#documentation": "

Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You\n can get the ID of the statement from the output of GetPolicy.

", + "smithy.api#examples": [ + { + "title": "To remove a Lambda function's permissions", + "documentation": "The following example removes a permissions statement named xaccount from the PROD alias of a function named my-function.", + "input": { + "FunctionName": "my-function", + "StatementId": "xaccount", + "Qualifier": "PROD" + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2015-03-31/functions/{FunctionName}/policy/{StatementId}", @@ -10991,7 +11916,19 @@ } ], "traits": { - "smithy.api#documentation": "

Adds tags to a function.

", + "smithy.api#documentation": "

Adds tags to a function, event source mapping, or code signing configuration.

", + "smithy.api#examples": [ + { + "title": "To add tags to an existing Lambda function", + "documentation": "The following example adds a tag with the key name DEPARTMENT and a value of 'Department A' to the specified Lambda function.", + "input": { + "Resource": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "Tags": { + "DEPARTMENT": "Department A" + } + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2017-03-31/tags/{Resource}", @@ -11003,9 +11940,9 @@ "type": "structure", "members": { "Resource": { - "target": "com.amazonaws.lambda#FunctionArn", + "target": "com.amazonaws.lambda#TaggableResource", "traits": { - "smithy.api#documentation": "

The function's Amazon Resource Name (ARN).

", + "smithy.api#documentation": "

The resource's Amazon Resource Name (ARN).

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -11013,7 +11950,7 @@ "Tags": { "target": "com.amazonaws.lambda#Tags", "traits": { - "smithy.api#documentation": "

A list of tags to apply to the function.

", + "smithy.api#documentation": "

A list of tags to apply to the resource.

", "smithy.api#required": {} } } @@ -11025,6 +11962,16 @@ "com.amazonaws.lambda#TagValue": { "type": "string" }, + "com.amazonaws.lambda#TaggableResource": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$" + } + }, "com.amazonaws.lambda#Tags": { "type": "map", "key": { @@ -11258,7 +12205,19 @@ } ], "traits": { - "smithy.api#documentation": "

Removes tags from a function.

", + "smithy.api#documentation": "

Removes tags from a function, event source mapping, or code signing configuration.

", + "smithy.api#examples": [ + { + "title": "To remove tags from an existing Lambda function", + "documentation": "The following example removes the tag with the key name DEPARTMENT tag from the my-function Lambda function.", + "input": { + "Resource": "arn:aws:lambda:us-west-2:123456789012:function:my-function", + "TagKeys": [ + "DEPARTMENT" + ] + } + } + ], "smithy.api#http": { "method": "DELETE", "uri": "/2017-03-31/tags/{Resource}", @@ -11270,9 +12229,9 @@ "type": "structure", "members": { "Resource": { - "target": "com.amazonaws.lambda#FunctionArn", + "target": "com.amazonaws.lambda#TaggableResource", "traits": { - "smithy.api#documentation": "

The function's Amazon Resource Name (ARN).

", + "smithy.api#documentation": "

The resource's Amazon Resource Name (ARN).

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -11280,7 +12239,7 @@ "TagKeys": { "target": "com.amazonaws.lambda#TagKeyList", "traits": { - "smithy.api#documentation": "

A list of tag keys to remove from the function.

", + "smithy.api#documentation": "

A list of tag keys to remove from the resource.

", "smithy.api#httpQuery": "tagKeys", "smithy.api#required": {} } @@ -11320,6 +12279,34 @@ ], "traits": { "smithy.api#documentation": "

Updates the configuration of a Lambda function alias.

", + "smithy.api#examples": [ + { + "title": "To update a function alias", + "documentation": "The following example updates the alias named BLUE to send 30% of traffic to version 2 and 70% to version 1.", + "input": { + "FunctionName": "my-function", + "Name": "BLUE", + "FunctionVersion": "2", + "RoutingConfig": { + "AdditionalVersionWeights": { + "1": 0.7 + } + } + }, + "output": { + "FunctionVersion": "2", + "Name": "BLUE", + "AliasArn": "arn:aws:lambda:us-west-2:123456789012:function:my-function:BLUE", + "RevisionId": "594f41fb-xmpl-4c20-95c7-6ca5f2a92c93", + "Description": "Production environment BLUE.", + "RoutingConfig": { + "AdditionalVersionWeights": { + "1": 0.7 + } + } + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/2015-03-31/functions/{FunctionName}/aliases/{Name}", @@ -11643,6 +12630,35 @@ ], "traits": { "smithy.api#documentation": "

Updates a Lambda function's code. If code signing is enabled for the function, the code package\n must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.

\n

If the function's package type is Image, then you must specify the code package in\n ImageUri as the URI of a container image in the Amazon ECR registry.

\n

If the function's package type is Zip, then you must specify the deployment package as a .zip file\n archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide\n the function code inline using the ZipFile field.

\n

The code in the deployment package must be compatible with the target instruction set architecture of the\n function (x86-64 or arm64).

\n

The function's code is locked when you publish a version. You can't modify the code of a published version,\n only the unpublished version.

\n \n

For a function defined as a container image, Lambda resolves the image tag to an image digest. In\n Amazon ECR, if you update the image tag to a new image, Lambda does not automatically\n update the function.

\n
", + "smithy.api#examples": [ + { + "title": "To update a Lambda function's code", + "documentation": "The following example replaces the code of the unpublished ($LATEST) version of a function named my-function with the contents of the specified zip file in Amazon S3.", + "input": { + "FunctionName": "my-function", + "S3Bucket": "my-bucket-1xpuxmplzrlbh", + "S3Key": "function.zip" + }, + "output": { + "TracingConfig": { + "Mode": "PassThrough" + }, + "CodeSha256": "PFn4S+er27qk+UuZSTKEQfNKG/XNn7QJs90mJgq6oH8=", + "FunctionName": "my-function", + "CodeSize": 308, + "RevisionId": "873282ed-xmpl-4dc8-a069-d0c647e470c6", + "MemorySize": 128, + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function", + "Version": "$LATEST", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Timeout": 3, + "LastModified": "2019-08-14T22:26:11.234+0000", + "Handler": "index.handler", + "Runtime": "nodejs12.x", + "Description": "" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/2015-03-31/functions/{FunctionName}/code", @@ -11760,7 +12776,35 @@ } ], "traits": { - "smithy.api#documentation": "

Modify the version-specific settings of a Lambda function.

\n

When you update a function, Lambda provisions an instance of the function and its supporting\n resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify\n the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason,\n and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration\n indicate when the update is complete and the function is processing events with the new configuration. For more\n information, see Lambda\n function states.

\n

These settings can vary between versions of a function and are locked when you publish a version. You can't\n modify the configuration of a published version, only the unpublished version.

\n

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions\n to an Amazon Web Services account or Amazon Web Servicesservice, use AddPermission.

", + "smithy.api#documentation": "

Modify the version-specific settings of a Lambda function.

\n

When you update a function, Lambda provisions an instance of the function and its supporting\n resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify\n the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason,\n and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration\n indicate when the update is complete and the function is processing events with the new configuration. For more\n information, see Lambda\n function states.

\n

These settings can vary between versions of a function and are locked when you publish a version. You can't\n modify the configuration of a published version, only the unpublished version.

\n

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions\n to an Amazon Web Services account or Amazon Web Services service, use AddPermission.

", + "smithy.api#examples": [ + { + "title": "To update a Lambda function's configuration", + "documentation": "The following example modifies the memory size to be 256 MB for the unpublished ($LATEST) version of a function named my-function.", + "input": { + "FunctionName": "my-function", + "MemorySize": 256 + }, + "output": { + "TracingConfig": { + "Mode": "PassThrough" + }, + "CodeSha256": "PFn4S+er27qk+UuZSTKEQfNKG/XNn7QJs90mJgq6oH8=", + "FunctionName": "my-function", + "CodeSize": 308, + "RevisionId": "873282ed-xmpl-4dc8-a069-d0c647e470c6", + "MemorySize": 256, + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function", + "Version": "$LATEST", + "Role": "arn:aws:iam::123456789012:role/lambda-role", + "Timeout": 3, + "LastModified": "2019-08-14T22:26:11.234+0000", + "Handler": "index.handler", + "Runtime": "nodejs12.x", + "Description": "" + } + } + ], "smithy.api#http": { "method": "PUT", "uri": "/2015-03-31/functions/{FunctionName}/configuration", @@ -11919,6 +12963,32 @@ ], "traits": { "smithy.api#documentation": "

Updates the configuration for asynchronous invocation for a function, version, or alias.

\n

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", + "smithy.api#examples": [ + { + "title": "To update an asynchronous invocation configuration", + "documentation": "The following example adds an on-failure destination to the existing asynchronous invocation configuration for a function named my-function.", + "input": { + "FunctionName": "my-function", + "DestinationConfig": { + "OnFailure": { + "Destination": "arn:aws:sqs:us-east-2:123456789012:destination" + } + } + }, + "output": { + "LastModified": 1.573687896493E9, + "FunctionArn": "arn:aws:lambda:us-east-2:123456789012:function:my-function:$LATEST", + "MaximumRetryAttempts": 0, + "MaximumEventAgeInSeconds": 3600, + "DestinationConfig": { + "OnSuccess": {}, + "OnFailure": { + "Destination": "arn:aws:sqs:us-east-2:123456789012:destination" + } + } + } + } + ], "smithy.api#http": { "method": "POST", "uri": "/2019-09-25/functions/{FunctionName}/event-invoke-config", diff --git a/models/mailmanager.json b/models/mailmanager.json index 39fb5641d2..80c7186610 100644 --- a/models/mailmanager.json +++ b/models/mailmanager.json @@ -613,6 +613,18 @@ "traits": { "smithy.api#enumValue": "SUBJECT" } + }, + "ENVELOPE_TO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENVELOPE_TO" + } + }, + "ENVELOPE_FROM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENVELOPE_FROM" + } } } }, @@ -1741,6 +1753,32 @@ "target": "smithy.api#String" } }, + "com.amazonaws.mailmanager#Envelope": { + "type": "structure", + "members": { + "Helo": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The HELO used by the host from which the email was received.

" + } + }, + "From": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The RCPT FROM given by the host from which the email was received.

" + } + }, + "To": { + "target": "com.amazonaws.mailmanager#StringList", + "traits": { + "smithy.api#documentation": "

All SMTP TO entries given by the host from which the email was received.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The SMTP envelope information of the email.

" + } + }, "com.amazonaws.mailmanager#ErrorMessage": { "type": "string" }, @@ -2218,6 +2256,18 @@ "traits": { "smithy.api#documentation": "

A pre-signed URL to temporarily download the full message content.

" } + }, + "Metadata": { + "target": "com.amazonaws.mailmanager#Metadata", + "traits": { + "smithy.api#documentation": "

The metadata about the email.

" + } + }, + "Envelope": { + "target": "com.amazonaws.mailmanager#Envelope", + "traits": { + "smithy.api#documentation": "

The SMTP envelope information of the email.

" + } } }, "traits": { @@ -4415,7 +4465,7 @@ }, "aws.protocols#awsJson1_0": {}, "smithy.api#cors": {}, - "smithy.api#documentation": "AWS SES Mail Manager API\n

\n AWS SES Mail Manager API contains operations and data types\n that comprise the Mail Manager feature of Amazon Simple Email Service.

\n

Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen\n your organization's email infrastructure, simplify email workflow management, and\n streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer\n Guide.

", + "smithy.api#documentation": "Amazon SES Mail Manager API\n

The Amazon SES Mail Manager API contains operations and data types\n that comprise the Mail Manager feature of Amazon Simple Email Service (SES).

\n

Mail Manager is a set of Amazon SES email gateway features designed to help you strengthen\n your organization's email infrastructure, simplify email workflow management, and\n streamline email compliance control. To learn more, see the Mail Manager chapter in the Amazon SES Developer\n Guide.

", "smithy.api#externalDocumentation": { "API Reference": "https://w.amazon.com/bin/view/AWS/Border" }, @@ -5120,6 +5170,68 @@ "smithy.api#documentation": "

The textual body content of an email message.

" } }, + "com.amazonaws.mailmanager#Metadata": { + "type": "structure", + "members": { + "Timestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the email was received.

" + } + }, + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The ID of the ingress endpoint through which the email was received.

" + } + }, + "TrafficPolicyId": { + "target": "com.amazonaws.mailmanager#TrafficPolicyId", + "traits": { + "smithy.api#documentation": "

The ID of the traffic policy that was in effect when the email was received.

" + } + }, + "RuleSetId": { + "target": "com.amazonaws.mailmanager#RuleSetId", + "traits": { + "smithy.api#documentation": "

The ID of the rule set that processed the email.

" + } + }, + "SenderHostname": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the host from which the email was received.

" + } + }, + "SenderIpAddress": { + "target": "com.amazonaws.mailmanager#SenderIpAddress", + "traits": { + "smithy.api#documentation": "

The IP address of the host from which the email was received.

" + } + }, + "TlsCipherSuite": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The TLS cipher suite used to communicate with the host from which the email was received.

" + } + }, + "TlsProtocol": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The TLS protocol used to communicate with the host from which the email was received.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The metadata about the email.

" + } + }, + "com.amazonaws.mailmanager#MimeHeaderAttribute": { + "type": "string", + "traits": { + "smithy.api#pattern": "^X-[a-zA-Z0-9-]{1,256}$" + } + }, "com.amazonaws.mailmanager#NameOrArn": { "type": "string", "traits": { @@ -5641,6 +5753,30 @@ "traits": { "smithy.api#documentation": "

The priority level of the email.

" } + }, + "IngressPointId": { + "target": "com.amazonaws.mailmanager#IngressPointId", + "traits": { + "smithy.api#documentation": "

The ID of the ingress endpoint through which the email was received.

" + } + }, + "SenderHostname": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The name of the host from which the email was received.

" + } + }, + "SenderIpAddress": { + "target": "com.amazonaws.mailmanager#SenderIpAddress", + "traits": { + "smithy.api#documentation": "

The IP address of the host from which the email was received.

" + } + }, + "Envelope": { + "target": "com.amazonaws.mailmanager#Envelope", + "traits": { + "smithy.api#documentation": "

The SMTP envelope information of the email.

" + } } }, "traits": { @@ -6400,6 +6536,12 @@ "traits": { "smithy.api#documentation": "

The email attribute to evaluate in a string condition expression.

" } + }, + "MimeHeaderAttribute": { + "target": "com.amazonaws.mailmanager#MimeHeaderAttribute", + "traits": { + "smithy.api#documentation": "

The email MIME X-Header attribute to evaluate in a string condition expression.

" + } } }, "traits": { @@ -6775,6 +6917,12 @@ "smithy.api#documentation": "

Sends the email to the internet using the ses:SendRawEmail API.

" } }, + "com.amazonaws.mailmanager#SenderIpAddress": { + "type": "string", + "traits": { + "smithy.api#sensitive": {} + } + }, "com.amazonaws.mailmanager#ServiceQuotaExceededException": { "type": "structure", "members": { @@ -6870,6 +7018,12 @@ "smithy.api#documentation": "

Details on where to deliver the exported email data.

", "smithy.api#required": {} } + }, + "IncludeMetadata": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Whether to include message metadata as JSON files in the export.

" + } } }, "traits": { @@ -7083,10 +7237,19 @@ "target": "smithy.api#String" } }, + "com.amazonaws.mailmanager#StringValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, "com.amazonaws.mailmanager#StringValueList": { "type": "list", "member": { - "target": "smithy.api#String" + "target": "com.amazonaws.mailmanager#StringValue" }, "traits": { "smithy.api#length": { @@ -7658,7 +7821,7 @@ } ], "traits": { - "smithy.api#documentation": "

>Update attributes of an already provisioned rule set.

", + "smithy.api#documentation": "

Update attributes of an already provisioned rule set.

", "smithy.api#idempotent": {} } }, diff --git a/models/marketplace-reporting.json b/models/marketplace-reporting.json new file mode 100644 index 0000000000..04b79f3f80 --- /dev/null +++ b/models/marketplace-reporting.json @@ -0,0 +1,954 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.marketplacereporting#AWSMarketplaceReporting": { + "type": "service", + "version": "2018-05-10", + "resources": [ + { + "target": "com.amazonaws.marketplacereporting#Dashboard" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "Marketplace Reporting", + "cloudTrailEventSource": "reporting-marketplace.amazonaws.com", + "arnNamespace": "aws-marketplace", + "endpointPrefix": "reporting-marketplace" + }, + "aws.auth#sigv4": { + "name": "aws-marketplace" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalExposedHeaders": [ + "x-amzn-requestid", + "x-amzn-errortype", + "x-amzn-errormessage", + "date" + ] + }, + "smithy.api#documentation": "

The Amazon Web Services Marketplace GetBuyerDashboard API enables you to get a procurement insights\n dashboard programmatically. The API gets the agreement and cost analysis dashboards with\n data for all of the Amazon Web Services accounts in your Amazon Web Services Organization.

\n

To use the Amazon Web Services Marketplace Reporting API, you must complete the following prerequisites:

\n
    \n
  • \n

    Enable all features for your organization. For more information, see Enabling all features for an organization with Organizations, in the\n Organizations User Guide.

    \n
  • \n
  • \n

    Call the service as the Organizations management account or an account registered\n as a delegated administrator for the procurement insights service.

    \n

    For more information about management accounts, see Tutorial:\n Creating and configuring an organization and Managing the management account with Organizations, both in the\n Organizations User Guide.

    \n

    For more information about delegated administrators, see Using\n delegated administrators, in the Amazon Web Services Marketplace Buyer\n Guide.

    \n
  • \n
  • \n

    Create an IAM policy that enables the\n aws-marketplace:GetBuyerDashboard and\n organizations:DescribeOrganization permissions. In addition,\n the management account requires the\n organizations:EnableAWSServiceAccess and\n iam:CreateServiceLinkedRole permissions to create. For more\n information about creating the policy, see Policies and permissions in\n Identity and Access Management, in the IAM User Guide.

    \n \n

    Access can be shared only by registering the desired linked account as a\n delegated administrator. That requires\n organizations:RegisterDelegatedAdministrator\n organizations:ListDelegatedAdministrators and\n organizations:DeregisterDelegatedAdministrator\n permissions.

    \n
    \n
  • \n
  • \n

    Use the Amazon Web Services Marketplace console to create the\n AWSServiceRoleForProcurementInsightsPolicy service-linked role.\n The role enables Amazon Web Services Marketplace procurement visibility integration. The management\n account requires an IAM policy with the\n organizations:EnableAWSServiceAccess and\n iam:CreateServiceLinkedRole permissions to create the\n service-linked role and enable the service access. For more information, see\n Granting access to\n Organizations and Service-linked role to share procurement data in the\n Amazon Web Services Marketplace Buyer Guide.

    \n
  • \n
  • \n

    After creating the service-linked role, you must enable trusted access that\n grants Amazon Web Services Marketplace permission to access data from your Organizations. For more information,\n see Granting access to\n Organizations in the Amazon Web Services Marketplace Buyer Guide.

    \n
  • \n
", + "smithy.api#title": "AWS Marketplace Reporting Service", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://reporting-marketplace-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://reporting-marketplace-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://reporting-marketplace.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://reporting-marketplace.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://reporting-marketplace.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.marketplacereporting#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.marketplacereporting#BadRequestException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The request is malformed, or it contains an error such as an invalid parameter. Ensure the request has all required parameters.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.marketplacereporting#Dashboard": { + "type": "resource", + "identifiers": { + "dashboardIdentifier": { + "target": "com.amazonaws.marketplacereporting#DashboardIdentifier" + } + }, + "operations": [ + { + "target": "com.amazonaws.marketplacereporting#GetBuyerDashboard" + } + ], + "traits": { + "aws.api#arn": { + "template": "{dashboardIdentifier}", + "absolute": true, + "noRegion": true + } + } + }, + "com.amazonaws.marketplacereporting#DashboardIdentifier": { + "type": "string", + "traits": { + "aws.api#arnReference": { + "service": "com.amazonaws.marketplacereporting#AWSMarketplaceReporting", + "resource": "com.amazonaws.marketplacereporting#Dashboard", + "type": "AWS::Service::Resource" + }, + "smithy.api#length": { + "min": 1, + "max": 1023 + }, + "smithy.api#pattern": "^arn:aws:aws-marketplace::[0-9]{12}:AWSMarketplace/ReportingData/(Agreement_V1/Dashboard/AgreementSummary_V1|BillingEvent_V1/Dashboard/CostAnalysis_V1)$" + } + }, + "com.amazonaws.marketplacereporting#EmbeddingDomain": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2000 + }, + "smithy.api#pattern": "^(https://[a-zA-Z\\.\\*0-9\\-_]+[\\.]{1}[a-zA-Z]{1,}[a-zA-Z0-9&?/-_=]*[a-zA-Z\\*0-9/]+|http[s]*://localhost(:[0-9]{1,5})?)$" + } + }, + "com.amazonaws.marketplacereporting#EmbeddingDomains": { + "type": "list", + "member": { + "target": "com.amazonaws.marketplacereporting#EmbeddingDomain" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.marketplacereporting#GetBuyerDashboard": { + "type": "operation", + "input": { + "target": "com.amazonaws.marketplacereporting#GetBuyerDashboardInput" + }, + "output": { + "target": "com.amazonaws.marketplacereporting#GetBuyerDashboardOutput" + }, + "errors": [ + { + "target": "com.amazonaws.marketplacereporting#AccessDeniedException" + }, + { + "target": "com.amazonaws.marketplacereporting#BadRequestException" + }, + { + "target": "com.amazonaws.marketplacereporting#InternalServerException" + }, + { + "target": "com.amazonaws.marketplacereporting#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Generates an embedding URL for an Amazon QuickSight dashboard for an anonymous user.

\n \n

This API is available only to Amazon Web Services Organization management accounts or\n delegated administrators registered for the procurement insights\n (procurement-insights.marketplace.amazonaws.com) feature.

\n
\n

The following rules apply to a generated URL:

\n
    \n
  • \n

    It contains a temporary bearer token, valid for 5 minutes after it is generated. Once redeemed within that period, it cannot be re-used again.

    \n
  • \n
  • \n

    It has a session lifetime of one hour. The 5-minute validity period runs separately from the session lifetime.

    \n
  • \n
", + "smithy.api#examples": [ + { + "title": "Getting an agreements dashboard", + "documentation": "The following example shows how to obtain a dashboard for active agreements", + "input": { + "dashboardIdentifier": "arn:aws:aws-marketplace::123456789012:AWSMarketplace/ReportingData/Agreement_V1/Dashboard/AgreementSummary_V1", + "embeddingDomains": [ + "https://*.amazon.com" + ] + }, + "output": { + "dashboardIdentifier": "arn:aws:aws-marketplace::123456789012:AWSMarketplace/ReportingData/Agreement_V1/Dashboard/AgreementSummary_V1", + "embedUrl": "https://us-east-1.quicksight.aws.amazon.com/embed/1235asdbffffbbasdf123/dashboards/8a66afa6-f316-4e71-a1ed-0a5bea07a314?code=abcdefghijklmn&identityprovider=quicksight&isauthcode=true", + "embeddingDomains": [ + "https://*.amazon.com" + ] + } + }, + { + "title": "Getting a cost-analysis dashboard", + "documentation": "The following example shows how to obtain a dashboard for cost analysis", + "input": { + "dashboardIdentifier": "arn:aws:aws-marketplace::123456789012:AWSMarketplace/ReportingData/BillingEvent_V1/Dashboard/CostAnalysis_V1", + "embeddingDomains": [ + "https://*.amazon.com" + ] + }, + "output": { + "dashboardIdentifier": "arn:aws:aws-marketplace::123456789012:AWSMarketplace/ReportingData/BillingEvent_V1/Dashboard/CostAnalysis_V1", + "embedUrl": "https://us-east-1.quicksight.aws.amazon.com/embed/1235asdbffffbbasdf123/dashboards/8a66afa6-f316-4e71-a1ed-0a5bea07a314?code=abcdefghijklmn&identityprovider=quicksight&isauthcode=true", + "embeddingDomains": [ + "https://*.amazon.com" + ] + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/getBuyerDashboard" + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "GetBuyerDashboardUnauthorized", + "params": { + "dashboardIdentifier": "arn:aws:aws-marketplace::021234567890:AWSMarketplace/ReportingData/Agreement_V1/Dashboard/AgreementSummary_V1", + "embeddingDomains": [ + "https://localhost:8080" + ] + }, + "expect": { + "failure": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.marketplacereporting#GetBuyerDashboardInput": { + "type": "structure", + "members": { + "dashboardIdentifier": { + "target": "com.amazonaws.marketplacereporting#DashboardIdentifier", + "traits": { + "smithy.api#documentation": "

The ARN of the requested dashboard.

", + "smithy.api#required": {} + } + }, + "embeddingDomains": { + "target": "com.amazonaws.marketplacereporting#EmbeddingDomains", + "traits": { + "smithy.api#documentation": "

Fully qualified domains that you add to the allow list for access to the generated URL that is then embedded. You can list up to two domains or subdomains in each API call.\n To include all subdomains under a specific domain, use *. For example, https://*.amazon.com includes all subdomains under \n https://aws.amazon.com.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.marketplacereporting#Dashboard", + "ids": { + "dashboardIdentifier": "dashboardIdentifier" + } + } + ] + } + }, + "com.amazonaws.marketplacereporting#GetBuyerDashboardOutput": { + "type": "structure", + "members": { + "embedUrl": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The dashboard's embedding URL.

", + "smithy.api#required": {} + } + }, + "dashboardIdentifier": { + "target": "com.amazonaws.marketplacereporting#DashboardIdentifier", + "traits": { + "smithy.api#documentation": "

The ARN of the returned dashboard.

", + "smithy.api#required": {} + } + }, + "embeddingDomains": { + "target": "com.amazonaws.marketplacereporting#EmbeddingDomains", + "traits": { + "smithy.api#documentation": "

The fully qualified domains specified in the request. The domains enable access to the generated URL that is then embedded. You can list up to two domains or subdomains in each API call.\n To include all subdomains under a specific domain, use *. For example, https://*.amazon.com includes all subdomains under \n https://aws.amazon.com.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.marketplacereporting#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The operation failed due to a server error.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.marketplacereporting#UnauthorizedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

You do not have permission to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 401 + } + } + } +} \ No newline at end of file diff --git a/models/mediaconvert.json b/models/mediaconvert.json index 88d0462fe0..69b3a72ff4 100644 --- a/models/mediaconvert.json +++ b/models/mediaconvert.json @@ -8780,6 +8780,28 @@ "smithy.api#documentation": "Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode." } }, + "com.amazonaws.mediaconvert#EncryptionContractConfiguration": { + "type": "structure", + "members": { + "SpekeAudioPreset": { + "target": "com.amazonaws.mediaconvert#PresetSpeke20Audio", + "traits": { + "smithy.api#documentation": "Specify which SPEKE version 2.0 audio preset MediaConvert uses to request content keys from your SPEKE server. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/drm-content-speke-v2-presets.html To encrypt to your audio outputs, choose from the following: Audio preset 1, Audio preset 2, or Audio preset 3. To encrypt your audio outputs, using the same content key for both your audio and video outputs: Choose Shared. When you do, you must also set SPEKE v2.0 video preset to Shared. To not encrypt your audio outputs: Choose Unencrypted. When you do, to encrypt your video outputs, you must also specify a SPEKE v2.0 video preset (other than Shared or Unencrypted).", + "smithy.api#jsonName": "spekeAudioPreset" + } + }, + "SpekeVideoPreset": { + "target": "com.amazonaws.mediaconvert#PresetSpeke20Video", + "traits": { + "smithy.api#documentation": "Specify which SPEKE version 2.0 video preset MediaConvert uses to request content keys from your SPEKE server. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/drm-content-speke-v2-presets.html To encrypt to your video outputs, choose from the following: Video preset 1, Video preset 2, Video preset 3, Video preset 4, Video preset 5, Video preset 6, Video preset 7, or Video preset 8. To encrypt your video outputs, using the same content key for both your video and audio outputs: Choose Shared. When you do, you must also set SPEKE v2.0 audio preset to Shared. To not encrypt your video outputs: Choose Unencrypted. When you do, to encrypt your audio outputs, you must also specify a SPEKE v2.0 audio preset (other than Shared or Unencrypted).", + "smithy.api#jsonName": "spekeVideoPreset" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify the SPEKE version, either v1.0 or v2.0, that MediaConvert uses when encrypting your output. For more information, see: https://docs.aws.amazon.com/speke/latest/documentation/speke-api-specification.html To use SPEKE v1.0: Leave blank. To use SPEKE v2.0: Specify a SPEKE v2.0 video preset and a SPEKE v2.0 audio preset." + } + }, "com.amazonaws.mediaconvert#Endpoint": { "type": "structure", "members": { @@ -20965,6 +20987,112 @@ "smithy.api#documentation": "Settings for preset" } }, + "com.amazonaws.mediaconvert#PresetSpeke20Audio": { + "type": "enum", + "members": { + "PRESET_AUDIO_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_AUDIO_1" + } + }, + "PRESET_AUDIO_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_AUDIO_2" + } + }, + "PRESET_AUDIO_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_AUDIO_3" + } + }, + "SHARED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHARED" + } + }, + "UNENCRYPTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNENCRYPTED" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify which SPEKE version 2.0 audio preset MediaConvert uses to request content keys from your SPEKE server. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/drm-content-speke-v2-presets.html To encrypt to your audio outputs, choose from the following: Audio preset 1, Audio preset 2, or Audio preset 3. To encrypt your audio outputs, using the same content key for both your audio and video outputs: Choose Shared. When you do, you must also set SPEKE v2.0 video preset to Shared. To not encrypt your audio outputs: Choose Unencrypted. When you do, to encrypt your video outputs, you must also specify a SPEKE v2.0 video preset (other than Shared or Unencrypted)." + } + }, + "com.amazonaws.mediaconvert#PresetSpeke20Video": { + "type": "enum", + "members": { + "PRESET_VIDEO_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_1" + } + }, + "PRESET_VIDEO_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_2" + } + }, + "PRESET_VIDEO_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_3" + } + }, + "PRESET_VIDEO_4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_4" + } + }, + "PRESET_VIDEO_5": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_5" + } + }, + "PRESET_VIDEO_6": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_6" + } + }, + "PRESET_VIDEO_7": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_7" + } + }, + "PRESET_VIDEO_8": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PRESET_VIDEO_8" + } + }, + "SHARED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHARED" + } + }, + "UNENCRYPTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNENCRYPTED" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify which SPEKE version 2.0 video preset MediaConvert uses to request content keys from your SPEKE server. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/drm-content-speke-v2-presets.html To encrypt to your video outputs, choose from the following: Video preset 1, Video preset 2, Video preset 3, Video preset 4, Video preset 5, Video preset 6, Video preset 7, or Video preset 8. To encrypt your video outputs, using the same content key for both your video and audio outputs: Choose Shared. When you do, you must also set SPEKE v2.0 audio preset to Shared. To not encrypt your video outputs: Choose Unencrypted. When you do, to encrypt your audio outputs, you must also specify a SPEKE v2.0 audio preset (other than Shared or Unencrypted)." + } + }, "com.amazonaws.mediaconvert#PricingPlan": { "type": "enum", "members": { @@ -22278,6 +22406,13 @@ "smithy.api#jsonName": "certificateArn" } }, + "EncryptionContractConfiguration": { + "target": "com.amazonaws.mediaconvert#EncryptionContractConfiguration", + "traits": { + "smithy.api#documentation": "Specify the SPEKE version, either v1.0 or v2.0, that MediaConvert uses when encrypting your output. For more information, see: https://docs.aws.amazon.com/speke/latest/documentation/speke-api-specification.html To use SPEKE v1.0: Leave blank. To use SPEKE v2.0: Specify a SPEKE v2.0 video preset and a SPEKE v2.0 audio preset.", + "smithy.api#jsonName": "encryptionContractConfiguration" + } + }, "ResourceId": { "target": "com.amazonaws.mediaconvert#__string", "traits": { @@ -22321,6 +22456,13 @@ "smithy.api#jsonName": "dashSignaledSystemIds" } }, + "EncryptionContractConfiguration": { + "target": "com.amazonaws.mediaconvert#EncryptionContractConfiguration", + "traits": { + "smithy.api#documentation": "Specify the SPEKE version, either v1.0 or v2.0, that MediaConvert uses when encrypting your output. For more information, see: https://docs.aws.amazon.com/speke/latest/documentation/speke-api-specification.html To use SPEKE v1.0: Leave blank. To use SPEKE v2.0: Specify a SPEKE v2.0 video preset and a SPEKE v2.0 audio preset.", + "smithy.api#jsonName": "encryptionContractConfiguration" + } + }, "HlsSignaledSystemIds": { "target": "com.amazonaws.mediaconvert#__listOf__stringMin36Max36Pattern09aFAF809aFAF409aFAF409aFAF409aFAF12", "traits": { diff --git a/models/medialive.json b/models/medialive.json index 79f02f39af..ee2f344e0b 100644 --- a/models/medialive.json +++ b/models/medialive.json @@ -1991,6 +1991,98 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.medialive#BandwidthReductionFilterSettings": { + "type": "structure", + "members": { + "PostFilterSharpening": { + "target": "com.amazonaws.medialive#BandwidthReductionPostFilterSharpening", + "traits": { + "smithy.api#documentation": "Configures the sharpening control, which is available when the bandwidth reduction filter is enabled. This\ncontrol sharpens edges and contours, which produces a specific artistic effect that you might want.\n\nWe recommend that you test each of the values (including DISABLED) to observe the sharpening effect on the\ncontent.", + "smithy.api#jsonName": "postFilterSharpening" + } + }, + "Strength": { + "target": "com.amazonaws.medialive#BandwidthReductionFilterStrength", + "traits": { + "smithy.api#documentation": "Enables the bandwidth reduction filter. The filter strengths range from 1 to 4. We recommend that you always\nenable this filter and use AUTO, to let MediaLive apply the optimum filtering for the context.", + "smithy.api#jsonName": "strength" + } + } + }, + "traits": { + "smithy.api#documentation": "Bandwidth Reduction Filter Settings" + } + }, + "com.amazonaws.medialive#BandwidthReductionFilterStrength": { + "type": "enum", + "members": { + "AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTO" + } + }, + "STRENGTH_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STRENGTH_1" + } + }, + "STRENGTH_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STRENGTH_2" + } + }, + "STRENGTH_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STRENGTH_3" + } + }, + "STRENGTH_4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STRENGTH_4" + } + } + }, + "traits": { + "smithy.api#documentation": "Bandwidth Reduction Filter Strength" + } + }, + "com.amazonaws.medialive#BandwidthReductionPostFilterSharpening": { + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + }, + "SHARPENING_1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHARPENING_1" + } + }, + "SHARPENING_2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHARPENING_2" + } + }, + "SHARPENING_3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SHARPENING_3" + } + } + }, + "traits": { + "smithy.api#documentation": "Bandwidth Reduction Post Filter Sharpening" + } + }, "com.amazonaws.medialive#BatchDelete": { "type": "operation", "input": { @@ -15485,6 +15577,12 @@ "traits": { "smithy.api#jsonName": "temporalFilterSettings" } + }, + "BandwidthReductionFilterSettings": { + "target": "com.amazonaws.medialive#BandwidthReductionFilterSettings", + "traits": { + "smithy.api#jsonName": "bandwidthReductionFilterSettings" + } } }, "traits": { @@ -16428,6 +16526,12 @@ "traits": { "smithy.api#jsonName": "temporalFilterSettings" } + }, + "BandwidthReductionFilterSettings": { + "target": "com.amazonaws.medialive#BandwidthReductionFilterSettings", + "traits": { + "smithy.api#jsonName": "bandwidthReductionFilterSettings" + } } }, "traits": { @@ -19967,12 +20071,6 @@ "smithy.api#enumValue": "AWS" } }, - "ON_PREMISE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ON_PREMISE" - } - }, "ON_PREMISES": { "target": "smithy.api#Unit", "traits": { @@ -19981,7 +20079,7 @@ } }, "traits": { - "smithy.api#documentation": "With the introduction of MediaLive OnPrem, a MediaLive input can now exist in two different places: AWS or\ninside an on-premise datacenter. By default all inputs will continue to be AWS inputs." + "smithy.api#documentation": "With the introduction of MediaLive Anywhere, a MediaLive input can now exist in two different places: AWS or\ninside an on-premises datacenter. By default all inputs will continue to be AWS inputs." } }, "com.amazonaws.medialive#InputPreference": { @@ -26002,6 +26100,20 @@ "smithy.api#documentation": "The multiplex object." } }, + "com.amazonaws.medialive#MultiplexContainerSettings": { + "type": "structure", + "members": { + "MultiplexM2tsSettings": { + "target": "com.amazonaws.medialive#MultiplexM2tsSettings", + "traits": { + "smithy.api#jsonName": "multiplexM2tsSettings" + } + } + }, + "traits": { + "smithy.api#documentation": "Multiplex Container Settings" + } + }, "com.amazonaws.medialive#MultiplexGroupSettings": { "type": "structure", "members": {}, @@ -26009,6 +26121,112 @@ "smithy.api#documentation": "Multiplex Group Settings" } }, + "com.amazonaws.medialive#MultiplexM2tsSettings": { + "type": "structure", + "members": { + "AbsentInputAudioBehavior": { + "target": "com.amazonaws.medialive#M2tsAbsentInputAudioBehavior", + "traits": { + "smithy.api#documentation": "When set to drop, output audio streams will be removed from the program if the selected input audio stream is removed from the input. This allows the output audio configuration to dynamically change based on input configuration. If this is set to encodeSilence, all output audio streams will output encoded silence when not connected to an active input stream.", + "smithy.api#jsonName": "absentInputAudioBehavior" + } + }, + "Arib": { + "target": "com.amazonaws.medialive#M2tsArib", + "traits": { + "smithy.api#documentation": "When set to enabled, uses ARIB-compliant field muxing and removes video descriptor.", + "smithy.api#jsonName": "arib" + } + }, + "AudioBufferModel": { + "target": "com.amazonaws.medialive#M2tsAudioBufferModel", + "traits": { + "smithy.api#documentation": "When set to dvb, uses DVB buffer model for Dolby Digital audio. When set to atsc, the ATSC model is used.", + "smithy.api#jsonName": "audioBufferModel" + } + }, + "AudioFramesPerPes": { + "target": "com.amazonaws.medialive#__integerMin0", + "traits": { + "smithy.api#documentation": "The number of audio frames to insert for each PES packet.", + "smithy.api#jsonName": "audioFramesPerPes" + } + }, + "AudioStreamType": { + "target": "com.amazonaws.medialive#M2tsAudioStreamType", + "traits": { + "smithy.api#documentation": "When set to atsc, uses stream type = 0x81 for AC3 and stream type = 0x87 for EAC3. When set to dvb, uses stream type = 0x06.", + "smithy.api#jsonName": "audioStreamType" + } + }, + "CcDescriptor": { + "target": "com.amazonaws.medialive#M2tsCcDescriptor", + "traits": { + "smithy.api#documentation": "When set to enabled, generates captionServiceDescriptor in PMT.", + "smithy.api#jsonName": "ccDescriptor" + } + }, + "Ebif": { + "target": "com.amazonaws.medialive#M2tsEbifControl", + "traits": { + "smithy.api#documentation": "If set to passthrough, passes any EBIF data from the input source to this output.", + "smithy.api#jsonName": "ebif" + } + }, + "EsRateInPes": { + "target": "com.amazonaws.medialive#M2tsEsRateInPes", + "traits": { + "smithy.api#documentation": "Include or exclude the ES Rate field in the PES header.", + "smithy.api#jsonName": "esRateInPes" + } + }, + "Klv": { + "target": "com.amazonaws.medialive#M2tsKlv", + "traits": { + "smithy.api#documentation": "If set to passthrough, passes any KLV data from the input source to this output.", + "smithy.api#jsonName": "klv" + } + }, + "NielsenId3Behavior": { + "target": "com.amazonaws.medialive#M2tsNielsenId3Behavior", + "traits": { + "smithy.api#documentation": "If set to passthrough, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", + "smithy.api#jsonName": "nielsenId3Behavior" + } + }, + "PcrControl": { + "target": "com.amazonaws.medialive#M2tsPcrControl", + "traits": { + "smithy.api#documentation": "When set to pcrEveryPesPacket, a Program Clock Reference value is inserted for every Packetized Elementary Stream (PES) header. This parameter is effective only when the PCR PID is the same as the video or audio elementary stream.", + "smithy.api#jsonName": "pcrControl" + } + }, + "PcrPeriod": { + "target": "com.amazonaws.medialive#__integerMin0Max500", + "traits": { + "smithy.api#documentation": "Maximum time in milliseconds between Program Clock Reference (PCRs) inserted into the transport stream.", + "smithy.api#jsonName": "pcrPeriod" + } + }, + "Scte35Control": { + "target": "com.amazonaws.medialive#M2tsScte35Control", + "traits": { + "smithy.api#documentation": "Optionally pass SCTE-35 signals from the input source to this output.", + "smithy.api#jsonName": "scte35Control" + } + }, + "Scte35PrerollPullupMilliseconds": { + "target": "com.amazonaws.medialive#__doubleMin0Max5000", + "traits": { + "smithy.api#documentation": "Defines the amount SCTE-35 preroll will be increased (in milliseconds) on the output. Preroll is the amount of time between the presence of a SCTE-35 indication in a transport stream and the PTS of the video frame it references. Zero means don't add pullup (it doesn't mean set the preroll to zero). Negative pullup is not supported, which means that you can't make the preroll shorter. Be aware that latency in the output will increase by the pullup amount.", + "smithy.api#jsonName": "scte35PrerollPullupMilliseconds" + } + } + }, + "traits": { + "smithy.api#documentation": "Multiplex M2ts Settings" + } + }, "com.amazonaws.medialive#MultiplexMediaConnectOutputDestinationSettings": { "type": "structure", "members": { @@ -26050,6 +26268,12 @@ "smithy.api#jsonName": "destination", "smithy.api#required": {} } + }, + "ContainerSettings": { + "target": "com.amazonaws.medialive#MultiplexContainerSettings", + "traits": { + "smithy.api#jsonName": "containerSettings" + } } }, "traits": { diff --git a/models/mediapackagev2.json b/models/mediapackagev2.json index 86781cc506..01de86232e 100644 --- a/models/mediapackagev2.json +++ b/models/mediapackagev2.json @@ -840,6 +840,9 @@ "ScteHls": { "target": "com.amazonaws.mediapackagev2#ScteHls" }, + "StartTag": { + "target": "com.amazonaws.mediapackagev2#StartTag" + }, "ManifestWindowSeconds": { "target": "smithy.api#Integer", "traits": { @@ -852,7 +855,7 @@ "ProgramDateTimeIntervalSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, \n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. \n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. \n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

", + "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval,\n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest.\n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.\n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

", "smithy.api#range": { "min": 1, "max": 1209600 @@ -892,6 +895,9 @@ "ScteHls": { "target": "com.amazonaws.mediapackagev2#ScteHls" }, + "StartTag": { + "target": "com.amazonaws.mediapackagev2#StartTag" + }, "ManifestWindowSeconds": { "target": "smithy.api#Integer", "traits": { @@ -904,7 +910,7 @@ "ProgramDateTimeIntervalSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, \n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. \n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. \n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

", + "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval,\n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest.\n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.\n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

", "smithy.api#range": { "min": 1, "max": 1209600 @@ -2370,6 +2376,12 @@ "max": 1209600 } } + }, + "ClipStartTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

Optionally specify the clip start time for all of your manifest egress requests. When you include clip start time, note that you cannot use clip start time query parameters for this manifest's endpoint URL.

" + } } }, "traits": { @@ -2909,7 +2921,7 @@ "ProgramDateTimeIntervalSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, \n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. \n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. \n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

" + "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval,\n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest.\n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.\n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

" } }, "ScteHls": { @@ -2917,6 +2929,9 @@ }, "FilterConfiguration": { "target": "com.amazonaws.mediapackagev2#FilterConfiguration" + }, + "StartTag": { + "target": "com.amazonaws.mediapackagev2#StartTag" } }, "traits": { @@ -2961,7 +2976,7 @@ "ProgramDateTimeIntervalSeconds": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, \n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. \n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. \n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

" + "smithy.api#documentation": "

Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval,\n EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest.\n The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.\n ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.

\n

Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.

" } }, "ScteHls": { @@ -2969,6 +2984,9 @@ }, "FilterConfiguration": { "target": "com.amazonaws.mediapackagev2#FilterConfiguration" + }, + "StartTag": { + "target": "com.amazonaws.mediapackagev2#StartTag" } }, "traits": { @@ -4836,6 +4854,27 @@ "smithy.api#documentation": "

The parameters for the SPEKE key provider.

" } }, + "com.amazonaws.mediapackagev2#StartTag": { + "type": "structure", + "members": { + "TimeOffset": { + "target": "smithy.api#Float", + "traits": { + "smithy.api#documentation": "

Specify the value for TIME-OFFSET within your EXT-X-START tag. Enter a signed floating point value which, if positive, must be less than the configured manifest duration minus three times the configured segment target duration. If negative, the absolute value must be larger than three times the configured segment target duration, and the absolute value must be smaller than the configured manifest duration.

", + "smithy.api#required": {} + } + }, + "Precise": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Specify the value for PRECISE within your EXT-X-START tag. Leave blank, or choose false, to use the default value NO. Choose yes to use the value YES.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

To insert an EXT-X-START tag in your HLS playlist, specify a StartTag configuration object with a valid TimeOffset. When you do, you can also optionally specify whether to include a PRECISE value in the EXT-X-START tag.

" + } + }, "com.amazonaws.mediapackagev2#TagArn": { "type": "string" }, @@ -4876,7 +4915,7 @@ "aws:TagKeys", "aws:RequestTag/${TagKey}" ], - "smithy.api#documentation": "

Assigns one of more tags (key-value pairs) to the specified MediaPackage resource.

\n

Tags can help you organize and categorize your resources. You can also use them to scope user \n permissions, by granting a user permission to access or change only resources with certain tag values.\n You can use the TagResource operation with a resource that already has tags. If you specify a new tag \n key for the resource, this tag is appended to the list of tags associated with the resource. If you \n specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

", + "smithy.api#documentation": "

Assigns one of more tags (key-value pairs) to the specified MediaPackage resource.

\n

Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions, by granting a user permission to access or change only resources with certain tag values.\n You can use the TagResource operation with a resource that already has tags. If you specify a new tag\n key for the resource, this tag is appended to the list of tags associated with the resource. If you\n specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

", "smithy.api#examples": [ { "title": "Add tags to a resource", @@ -6103,6 +6142,18 @@ "traits": { "smithy.api#enumValue": "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY" } + }, + "CLIP_START_TIME_WITH_START_OR_END": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CLIP_START_TIME_WITH_START_OR_END" + } + }, + "START_TAG_TIME_OFFSET_INVALID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "START_TAG_TIME_OFFSET_INVALID" + } } } }, @@ -6184,7 +6235,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "\n

This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. \n To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to\n migrate your resources from MediaPackage v1 to MediaPackage v2.

\n

The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information \n added, like \"v2\", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use\n the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources.

\n

If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, \n see the MediaPackage v1 Live API Reference.

\n
\n

This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols.

\n

We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide.

", + "smithy.api#documentation": "\n

This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023.\n To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to\n migrate your resources from MediaPackage v1 to MediaPackage v2.

\n

The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information\n added, like \"v2\", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use\n the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources.

\n

If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet,\n see the MediaPackage v1 Live API Reference.

\n
\n

This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols.

\n

We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide.

", "smithy.api#title": "AWS Elemental MediaPackage v2", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/memorydb.json b/models/memorydb.json index a3376ad257..431cac5658 100644 --- a/models/memorydb.json +++ b/models/memorydb.json @@ -362,7 +362,7 @@ "name": "memorydb" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

MemoryDB is a fully managed, Redis OSS-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis OSS, a popular open source data store, enabling you to leverage Redis OSS’ flexible and friendly data structures, APIs, and commands.

", + "smithy.api#documentation": "

MemoryDB for Redis is a fully managed, Redis-compatible, in-memory database that delivers ultra-fast performance and Multi-AZ durability for modern applications built using microservices architectures.\n \n MemoryDB stores the entire database in-memory, enabling low latency and high throughput data access. It is compatible with Redis, a popular open source data store, enabling you to leverage Redis’ flexible and friendly data structures, APIs, and commands.

", "smithy.api#title": "Amazon MemoryDB", "smithy.api#xmlNamespace": { "uri": "http://memorydb.amazonaws.com/doc/2021-01-01/" @@ -1461,16 +1461,22 @@ "smithy.api#documentation": "

The cluster's node type

" } }, + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

\n The Redis OSS or Valkey engine used by the cluster.

" + } + }, "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis OSS engine version used by the cluster

" + "smithy.api#documentation": "

The Redis engine version used by the cluster

" } }, "EnginePatchVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis OSS engine patch version used by the cluster

" + "smithy.api#documentation": "

The engine patch version used by the cluster

" } }, "ParameterGroupName": { @@ -1606,10 +1612,16 @@ "smithy.api#documentation": "

The node type used for the cluster

" } }, + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

The configuration for the Redis OSS or Valkey engine used by the cluster.

" + } + }, "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis OSS engine version used by the cluster

" + "smithy.api#documentation": "

The engine version used by the cluster

" } }, "MaintenanceWindow": { @@ -2108,10 +2120,16 @@ "smithy.api#required": {} } }, + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey.

" + } + }, "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The version number of the Redis OSS engine to be used for the cluster.

" + "smithy.api#documentation": "

The version number of the engine to be used for the cluster.

" } }, "AutoMinorVersionUpgrade": { @@ -2593,7 +2611,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a cluster. It also deletes all associated nodes and node endpoints

\n \n

\n CreateSnapshot permission is required to create a final snapshot. \n Without this permission, the API call will fail with an Access Denied exception.

\n
" + "smithy.api#documentation": "

Deletes a cluster. It also deletes all associated nodes and node endpoints

" } }, "com.amazonaws.memorydb#DeleteClusterRequest": { @@ -3027,7 +3045,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of the available Redis OSS engine versions.

", + "smithy.api#documentation": "

Returns a list of the available engine versions.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3039,10 +3057,16 @@ "com.amazonaws.memorydb#DescribeEngineVersionsRequest": { "type": "structure", "members": { + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

The engine version to return. Valid values are either valkey or redis.

" + } + }, "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { - "smithy.api#documentation": "

The Redis OSS engine version

" + "smithy.api#documentation": "

The engine version.

" } }, "ParameterGroupFamily": { @@ -3926,6 +3950,12 @@ "com.amazonaws.memorydb#EngineVersionInfo": { "type": "structure", "members": { + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

The version of the Redis OSS or Valkey engine used by the cluster.

" + } + }, "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { @@ -3946,7 +3976,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides details of the Redis OSS engine version

" + "smithy.api#documentation": "

Provides details of the engine version.

" } }, "com.amazonaws.memorydb#EngineVersionInfoList": { @@ -5370,6 +5400,12 @@ "smithy.api#documentation": "

Reflects the nature of the service update

" } }, + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

The MemoryDB engine to which the update applies. The values are either Redis or Valkey.

" + } + }, "NodesUpdated": { "target": "com.amazonaws.memorydb#String", "traits": { @@ -6505,6 +6541,12 @@ "smithy.api#documentation": "

A valid node type that you want to scale this cluster up or down to.

" } }, + "Engine": { + "target": "com.amazonaws.memorydb#String", + "traits": { + "smithy.api#documentation": "

The name of the engine to be used for the nodes in this cluster. The value must be set to either Redis or Valkey.

" + } + }, "EngineVersion": { "target": "com.amazonaws.memorydb#String", "traits": { diff --git a/models/neptune-graph.json b/models/neptune-graph.json index 03a6260575..428904d8ad 100644 --- a/models/neptune-graph.json +++ b/models/neptune-graph.json @@ -2195,7 +2195,7 @@ "format": { "target": "com.amazonaws.neptunegraph#Format", "traits": { - "smithy.api#documentation": "

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies\n the Gremlin\n CSV format or OPENCYPHER, which identies the openCypher\n load format.

" + "smithy.api#documentation": "

Specifies the format of S3 data to be imported. Valid values are CSV, which identifies\n the Gremlin\n CSV format, OPEN_CYPHER, which identifies the openCypher\n load format, or ntriples, which identifies the\n RDF n-triples format.

" } }, "blankNodeHandling": { @@ -5238,7 +5238,7 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 32, + "min": 16, "max": 24576 } } diff --git a/models/neptune.json b/models/neptune.json index 67f5e67620..d25e0687cc 100644 --- a/models/neptune.json +++ b/models/neptune.json @@ -1442,17 +1442,6 @@ "expect": { "error": "Invalid Configuration: Missing Region" } - }, - { - "documentation": "Partition doesn't support DualStack", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } } ], "version": "1.0" @@ -6123,7 +6112,20 @@ "outputToken": "Marker", "items": "DBEngineVersions", "pageSize": "MaxRecords" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeDBEngineVersionsSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.neptune#DescribeDBEngineVersionsMessage": { @@ -6212,6 +6214,21 @@ "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], + "smithy.test#smokeTests": [ + { + "id": "DescribeDBInstancesFailure", + "params": { + "DBInstanceIdentifier": "fake-id" + }, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "failure": {} + } + } + ], "smithy.waiters#waitable": { "DBInstanceAvailable": { "acceptors": [ diff --git a/models/organizations.json b/models/organizations.json index 35aa482926..9efaec2e81 100644 --- a/models/organizations.json +++ b/models/organizations.json @@ -1902,7 +1902,7 @@ } ], "traits": { - "smithy.api#documentation": "

Attaches a policy to a root, an organizational unit (OU), or an individual account.\n How the policy affects accounts depends on the type of policy. Refer to the\n Organizations User Guide for information about each policy type:

\n \n

This operation can be called only from the organization's\nmanagement account or by a member account that is a delegated administrator for an Amazon Web Services service.

", + "smithy.api#documentation": "

Attaches a policy to a root, an organizational unit (OU), or an individual account.\n How the policy affects accounts depends on the type of policy. Refer to the\n Organizations User Guide for information about each policy type:

\n \n

This operation can be called only from the organization's\nmanagement account or by a member account that is a delegated administrator for an Amazon Web Services service.

", "smithy.api#examples": [ { "title": "To attach a policy to an account", @@ -2128,7 +2128,7 @@ } ], "traits": { - "smithy.api#documentation": "

Closes an Amazon Web Services member account within an organization. You can close an account when\n all\n features are enabled . You can't close the management account with this API.\n This is an asynchronous request that Amazon Web Services performs in the background. Because\n CloseAccount operates asynchronously, it can return a successful\n completion message even though account closure might still be in progress. You need to\n wait a few minutes before the account is fully closed. To check the status of the\n request, do one of the following:

\n
    \n
  • \n

    Use the AccountId that you sent in the CloseAccount\n request to provide as a parameter to the DescribeAccount\n operation.

    \n

    While the close account request is in progress, Account status will indicate\n PENDING_CLOSURE. When the close account request completes, the status will\n change to SUSPENDED.

    \n
  • \n
  • \n

    Check the CloudTrail log for the CloseAccountResult event that gets\n published after the account closes successfully. For information on using CloudTrail\n with Organizations, see Logging and monitoring in Organizations in the\n Organizations User Guide.

    \n
  • \n
\n \n
    \n
  • \n

    You can close only 10% of member accounts, between 10 and 1000, within a\n rolling 30 day period. This quota is not bound by a calendar month, but\n starts when you close an account. After you reach this limit, you can close\n additional accounts. For more information, see Closing a member\n account in your organization and Quotas for\n Organizationsin the Organizations User Guide.

    \n
  • \n
  • \n

    To reinstate a closed account, contact Amazon Web Services Support within the 90-day\n grace period while the account is in SUSPENDED status.

    \n
  • \n
  • \n

    If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud\n (US) account, the CloseAccount request will close both\n accounts. To learn important pre-closure details, see \n Closing an Amazon Web Services GovCloud (US) account in the \n Amazon Web Services GovCloud User Guide.

    \n
  • \n
\n
" + "smithy.api#documentation": "

Closes an Amazon Web Services member account within an organization. You can close an account when\n all\n features are enabled . You can't close the management account with this API.\n This is an asynchronous request that Amazon Web Services performs in the background. Because\n CloseAccount operates asynchronously, it can return a successful\n completion message even though account closure might still be in progress. You need to\n wait a few minutes before the account is fully closed. To check the status of the\n request, do one of the following:

\n
    \n
  • \n

    Use the AccountId that you sent in the CloseAccount\n request to provide as a parameter to the DescribeAccount\n operation.

    \n

    While the close account request is in progress, Account status will indicate\n PENDING_CLOSURE. When the close account request completes, the status will\n change to SUSPENDED.

    \n
  • \n
  • \n

    Check the CloudTrail log for the CloseAccountResult event that gets\n published after the account closes successfully. For information on using CloudTrail\n with Organizations, see Logging and monitoring in Organizations in the\n Organizations User Guide.

    \n
  • \n
\n \n
    \n
  • \n

    You can close only 10% of member accounts, between 10 and 1000, within a\n rolling 30 day period. This quota is not bound by a calendar month, but\n starts when you close an account. After you reach this limit, you can't close\n additional accounts. For more information, see Closing a member\n account in your organization and Quotas for\n Organizations in the Organizations User Guide.

    \n
  • \n
  • \n

    To reinstate a closed account, contact Amazon Web Services Support within the 90-day\n grace period while the account is in SUSPENDED status.

    \n
  • \n
  • \n

    If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud\n (US) account, the CloseAccount request will close both\n accounts. To learn important pre-closure details, see \n Closing an Amazon Web Services GovCloud (US) account in the \n Amazon Web Services GovCloud User Guide.

    \n
  • \n
\n
" } }, "com.amazonaws.organizations#CloseAccountRequest": { @@ -2183,7 +2183,7 @@ } }, "traits": { - "smithy.api#documentation": "

Performing this operation violates a minimum or maximum value limit. For example,\n attempting to remove the last service control policy (SCP) from an OU or root, inviting\n or creating too many accounts to the organization, or attaching too many policies to an\n account, OU, or root. This exception includes a reason that contains additional\n information about the violated limit:

\n \n

Some of the reasons in the following list might not be applicable to this specific\n API or operation.

\n
\n
    \n
  • \n

    ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management\n account from the organization. You can't remove the management account. Instead,\n after you remove all member accounts, delete the organization itself.

    \n
  • \n
  • \n

    ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an\n account from the organization that doesn't yet have enough information to exist\n as a standalone account. This account requires you to first complete phone\n verification. Follow the steps at Removing a member account from your organization in the\n Organizations User Guide.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can create in one day.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your\n account isn't fully active. You must complete the account setup before you\n create an organization.

    \n
  • \n
  • \n

    ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number\n of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to\n request an increase in your limit.

    \n

    Or the number of invitations that you tried to send would cause you to exceed\n the limit of accounts in your organization. Send fewer invitations or contact\n Amazon Web Services Support to request an increase in the number of accounts.

    \n \n

    Deleted and closed accounts still count toward your limit.

    \n
    \n \n

    If you get this exception when running a command immediately after\n creating the organization, wait one hour and try again. After an hour, if\n the command continues to fail with this error, contact Amazon Web Services Support.

    \n
    \n
  • \n
  • \n

    CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot\n register a suspended account as a delegated administrator.

    \n
  • \n
  • \n

    CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register\n the management account of the organization as a delegated administrator for an\n Amazon Web Services service integrated with Organizations. You can designate only a member account as a\n delegated administrator.

    \n
  • \n
  • \n

    CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management\n account. To close the management account for the organization, you must first\n either remove or close all member accounts in the organization. Follow standard\n account closure process using root credentials.​

    \n
  • \n
  • \n

    CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an\n account that is registered as a delegated administrator for a service integrated\n with your organization. To complete this operation, you must first deregister\n this account as a delegated administrator.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the\n past 30 days.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can close at a time. ​

    \n
  • \n
  • \n

    CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an\n organization in the specified region, you must enable all features mode.

    \n
  • \n
  • \n

    DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an\n Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has\n a delegated administrator. To complete this operation, you must first deregister\n any existing delegated administrators for this service.

    \n
  • \n
  • \n

    EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for\n a limited period of time. You must resubmit the request and generate a new\n verfication code.

    \n
  • \n
  • \n

    HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n handshakes that you can send in one day.

    \n
  • \n
  • \n

    INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported\n payment method is associated with the account. Amazon Web Services does not support cards\n issued by financial institutions in Russia or Belarus. For more information, see\n Managing your\n Amazon Web Services payments.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in\n this organization, you first must migrate the organization's management account\n to the marketplace that corresponds to the management account's address. All\n accounts in an organization must be associated with the same marketplace.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in\n China. To create an organization, the master must have a valid business license.\n For more information, contact customer support.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must\n first provide a valid contact address and phone number for the management\n account. Then try the operation again.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the\n management account must have an associated account in the Amazon Web Services GovCloud\n (US-West) Region. For more information, see Organizations\n in the \n Amazon Web Services GovCloud User Guide.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with\n this management account, you first must associate a valid payment instrument,\n such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to\n register more delegated administrators than allowed for the service principal.\n

    \n
  • \n
  • \n

    MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number\n of policies of a certain type that can be attached to an entity at one\n time.

    \n
  • \n
  • \n

    MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this\n resource.

    \n
  • \n
  • \n

    MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with\n this member account, you first must associate a valid payment instrument, such\n as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy\n from an entity that would cause the entity to have fewer than the minimum number\n of policies of a certain type required.

    \n
  • \n
  • \n

    ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation\n that requires the organization to be configured to support all features. An\n organization that supports only consolidated billing features can't perform this\n operation.

    \n
  • \n
  • \n

    OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many\n levels deep.

    \n
  • \n
  • \n

    OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you\n can have in an organization.

    \n
  • \n
  • \n

    POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger\n than the maximum size.

    \n
  • \n
  • \n

    POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies\n that you can have in an organization.

    \n
  • \n
  • \n

    SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated\n administrator before you enabled service access. Call the\n EnableAWSServiceAccess API first.

    \n
  • \n
  • \n

    TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags\n that are not compliant with the tag policy requirements for this account.

    \n
  • \n
  • \n

    WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, there is a waiting\n period before you can remove it from the organization. If you get an error that\n indicates that a wait period is required, try again in a few days.

    \n
  • \n
", + "smithy.api#documentation": "

Performing this operation violates a minimum or maximum value limit. For example,\n attempting to remove the last service control policy (SCP) from an OU or root, inviting\n or creating too many accounts to the organization, or attaching too many policies to an\n account, OU, or root. This exception includes a reason that contains additional\n information about the violated limit:

\n \n

Some of the reasons in the following list might not be applicable to this specific\n API or operation.

\n
\n
    \n
  • \n

    ACCOUNT_CANNOT_LEAVE_ORGANIZATION: You attempted to remove the management\n account from the organization. You can't remove the management account. Instead,\n after you remove all member accounts, delete the organization itself.

    \n
  • \n
  • \n

    ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an\n account from the organization that doesn't yet have enough information to exist\n as a standalone account. This account requires you to first complete phone\n verification. Follow the steps at Removing a member account from your organization in the\n Organizations User Guide.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can create in one day.

    \n
  • \n
  • \n

    ACCOUNT_CREATION_NOT_COMPLETE: Your account setup isn't complete or your\n account isn't fully active. You must complete the account setup before you\n create an organization.

    \n
  • \n
  • \n

    ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the number\n of accounts in an organization. If you need more accounts, contact Amazon Web Services Support to\n request an increase in your limit.

    \n

    Or the number of invitations that you tried to send would cause you to exceed\n the limit of accounts in your organization. Send fewer invitations or contact\n Amazon Web Services Support to request an increase in the number of accounts.

    \n \n

    Deleted and closed accounts still count toward your limit.

    \n
    \n \n

    If you get this exception when running a command immediately after\n creating the organization, wait one hour and try again. After an hour, if\n the command continues to fail with this error, contact Amazon Web Services Support.

    \n
    \n
  • \n
  • \n

    CANNOT_REGISTER_SUSPENDED_ACCOUNT_AS_DELEGATED_ADMINISTRATOR: You cannot\n register a suspended account as a delegated administrator.

    \n
  • \n
  • \n

    CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register\n the management account of the organization as a delegated administrator for an\n Amazon Web Services service integrated with Organizations. You can designate only a member account as a\n delegated administrator.

    \n
  • \n
  • \n

    CANNOT_CLOSE_MANAGEMENT_ACCOUNT: You attempted to close the management\n account. To close the management account for the organization, you must first\n either remove or close all member accounts in the organization. Follow standard\n account closure process using root credentials.​

    \n
  • \n
  • \n

    CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an\n account that is registered as a delegated administrator for a service integrated\n with your organization. To complete this operation, you must first deregister\n this account as a delegated administrator.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_QUOTA_EXCEEDED: You have exceeded close account quota for the\n past 30 days.

    \n
  • \n
  • \n

    CLOSE_ACCOUNT_REQUESTS_LIMIT_EXCEEDED: You attempted to exceed the number of\n accounts that you can close at a time. ​

    \n
  • \n
  • \n

    CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an\n organization in the specified region, you must enable all features mode.

    \n
  • \n
  • \n

    DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an\n Amazon Web Services account as a delegated administrator for an Amazon Web Services service that already has\n a delegated administrator. To complete this operation, you must first deregister\n any existing delegated administrators for this service.

    \n
  • \n
  • \n

    EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for\n a limited period of time. You must resubmit the request and generate a new\n verfication code.

    \n
  • \n
  • \n

    HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of\n handshakes that you can send in one day.

    \n
  • \n
  • \n

    INVALID_PAYMENT_INSTRUMENT: You cannot remove an account because no supported\n payment method is associated with the account. Amazon Web Services does not support cards\n issued by financial institutions in Russia or Belarus. For more information, see\n Managing your\n Amazon Web Services payments.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in\n this organization, you first must migrate the organization's management account\n to the marketplace that corresponds to the management account's address. All\n accounts in an organization must be associated with the same marketplace.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the Amazon Web Services Regions in\n China. To create an organization, the master must have a valid business license.\n For more information, contact customer support.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must\n first provide a valid contact address and phone number for the management\n account. Then try the operation again.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the\n management account must have an associated account in the Amazon Web Services GovCloud\n (US-West) Region. For more information, see Organizations\n in the \n Amazon Web Services GovCloud User Guide.

    \n
  • \n
  • \n

    MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with\n this management account, you first must associate a valid payment instrument,\n such as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to\n register more delegated administrators than allowed for the service principal.\n

    \n
  • \n
  • \n

    MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number\n of policies of a certain type that can be attached to an entity at one\n time.

    \n
  • \n
  • \n

    MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this\n resource.

    \n
  • \n
  • \n

    MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation with\n this member account, you first must associate a valid payment instrument, such\n as a credit card, with the account. For more information, see Considerations before removing an account from an organization in\n the Organizations User Guide.

    \n
  • \n
  • \n

    MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy\n from an entity that would cause the entity to have fewer than the minimum number\n of policies of a certain type required.

    \n
  • \n
  • \n

    ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation\n that requires the organization to be configured to support all features. An\n organization that supports only consolidated billing features can't perform this\n operation.

    \n
  • \n
  • \n

    OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many\n levels deep.

    \n
  • \n
  • \n

    OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs that you\n can have in an organization.

    \n
  • \n
  • \n

    POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger\n than the maximum size.

    \n
  • \n
  • \n

    POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies\n that you can have in an organization.

    \n
  • \n
  • \n

    SERVICE_ACCESS_NOT_ENABLED: You attempted to register a delegated\n administrator before you enabled service access. Call the\n EnableAWSServiceAccess API first.

    \n
  • \n
  • \n

    TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags\n that are not compliant with the tag policy requirements for this account.

    \n
  • \n
  • \n

    WAIT_PERIOD_ACTIVE: After you create an Amazon Web Services account, you must wait until at least seven days after the account was created.\n Invited accounts aren't subject to this waiting period.

    \n
  • \n
", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -2441,7 +2441,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Amazon Web Services account that is automatically a member of the organization whose\n credentials made the request. This is an asynchronous request that Amazon Web Services performs in the\n background. Because CreateAccount operates asynchronously, it can return a\n successful completion message even though account initialization might still be in\n progress. You might need to wait a few minutes before you can successfully access the\n account. To check the status of the request, do one of the following:

\n
    \n
  • \n

    Use the Id value of the CreateAccountStatus response\n element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

    \n
  • \n
  • \n

    Check the CloudTrail log for the CreateAccountResult event. For\n information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the\n Organizations User Guide.

    \n
  • \n
\n

The user who calls the API to create an account must have the\n organizations:CreateAccount permission. If you enabled all features in\n the organization, Organizations creates the required service-linked role named\n AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the\n Organizations User Guide.

\n

If the request includes tags, then the requester must have the\n organizations:TagResource permission.

\n

Organizations preconfigures the new member account with a role (named\n OrganizationAccountAccessRole by default) that grants users in the\n management account administrator permissions in the new member account. Principals in\n the management account can assume the role. Organizations clones the company name and address\n information for the new account from the organization's management account.

\n

This operation can be called only from the organization's management account.

\n

For more information about creating accounts, see Creating\n a member account in your organization in the\n Organizations User Guide.

\n \n
    \n
  • \n

    When you create an account in an organization using the Organizations console,\n API, or CLI commands, the information required for the account to operate\n as a standalone account, such as a payment method is not automatically\n collected. If you must remove an account from your organization later, you\n can do so only after you provide the missing information. For more\n information, see Considerations before removing an account from an organization\n in the Organizations User Guide.

    \n
  • \n
  • \n

    If you get an exception that indicates that you exceeded your account\n limits for the organization, contact Amazon Web Services Support.

    \n
  • \n
  • \n

    If you get an exception that indicates that the operation failed because\n your organization is still initializing, wait one hour and then try again.\n If the error persists, contact Amazon Web Services Support.

    \n
  • \n
  • \n

    Using CreateAccount to create multiple temporary accounts\n isn't recommended. You can only close an account from the Billing and Cost Management console, and\n you must be signed in as the root user. For information on the requirements\n and process for closing an account, see Closing a member\n account in your organization in the\n Organizations User Guide.

    \n
  • \n
\n
\n \n

When you create a member account with this operation, you can choose whether to\n create the account with the IAM User and Role Access to\n Billing Information switch enabled. If you enable it, IAM users and\n roles that have appropriate permissions can view billing information for the\n account. If you disable it, only the account root user can access billing\n information. For information about how to disable this switch for an account, see\n Granting access to\n your billing information and tools.

\n
", + "smithy.api#documentation": "

Creates an Amazon Web Services account that is automatically a member of the organization whose\n credentials made the request. This is an asynchronous request that Amazon Web Services performs in the\n background. Because CreateAccount operates asynchronously, it can return a\n successful completion message even though account initialization might still be in\n progress. You might need to wait a few minutes before you can successfully access the\n account. To check the status of the request, do one of the following:

\n
    \n
  • \n

    Use the Id value of the CreateAccountStatus response\n element from this operation to provide as a parameter to the DescribeCreateAccountStatus operation.

    \n
  • \n
  • \n

    Check the CloudTrail log for the CreateAccountResult event. For\n information on using CloudTrail with Organizations, see Logging and monitoring in Organizations in the\n Organizations User Guide.

    \n
  • \n
\n

The user who calls the API to create an account must have the\n organizations:CreateAccount permission. If you enabled all features in\n the organization, Organizations creates the required service-linked role named\n AWSServiceRoleForOrganizations. For more information, see Organizations and service-linked roles in the\n Organizations User Guide.

\n

If the request includes tags, then the requester must have the\n organizations:TagResource permission.

\n

Organizations preconfigures the new member account with a role (named\n OrganizationAccountAccessRole by default) that grants users in the\n management account administrator permissions in the new member account. Principals in\n the management account can assume the role. Organizations clones the company name and address\n information for the new account from the organization's management account.

\n

This operation can be called only from the organization's management account.

\n

For more information about creating accounts, see Creating\n a member account in your organization in the\n Organizations User Guide.

\n \n
    \n
  • \n

    When you create an account in an organization using the Organizations console,\n API, or CLI commands, the information required for the account to operate\n as a standalone account, such as a payment method is not automatically\n collected. If you must remove an account from your organization later, you\n can do so only after you provide the missing information. For more\n information, see Considerations before removing an account from an organization\n in the Organizations User Guide.

    \n
  • \n
  • \n

    If you get an exception that indicates that you exceeded your account\n limits for the organization, contact Amazon Web Services Support.

    \n
  • \n
  • \n

    If you get an exception that indicates that the operation failed because\n your organization is still initializing, wait one hour and then try again.\n If the error persists, contact Amazon Web Services Support.

    \n
  • \n
  • \n

    It isn't recommended to use CreateAccount to create multiple temporary accounts, and using \n the CreateAccount API to close accounts is subject to a 30-day usage quota. For information on the requirements\n and process for closing an account, see Closing a member\n account in your organization in the\n Organizations User Guide.

    \n
  • \n
\n
\n \n

When you create a member account with this operation, you can choose whether to\n create the account with the IAM User and Role Access to\n Billing Information switch enabled. If you enable it, IAM users and\n roles that have appropriate permissions can view billing information for the\n account. If you disable it, only the account root user can access billing\n information. For information about how to disable this switch for an account, see\n Granting access to\n your billing information and tools.

\n
", "smithy.api#examples": [ { "title": "To create a new account that is automatically part of the organization", @@ -3129,7 +3129,7 @@ "Type": { "target": "com.amazonaws.organizations#PolicyType", "traits": { - "smithy.api#documentation": "

The type of policy to create. You can specify one of the following values:

\n ", + "smithy.api#documentation": "

The type of policy to create. You can specify one of the following values:

\n ", "smithy.api#required": {} } }, @@ -3837,7 +3837,7 @@ "PolicyType": { "target": "com.amazonaws.organizations#EffectivePolicyType", "traits": { - "smithy.api#documentation": "

The type of policy that you want information about. You can specify one of the\n following values:

\n ", + "smithy.api#documentation": "

The type of policy that you want information about. You can specify one of the\n following values:

\n ", "smithy.api#required": {} } }, @@ -4032,7 +4032,7 @@ "Organization": { "target": "com.amazonaws.organizations#Organization", "traits": { - "smithy.api#documentation": "

A structure that contains information about the organization.

\n \n

The AvailablePolicyTypes part of the response is deprecated, and you\n shouldn't use it in your apps. It doesn't include any policy type supported by Organizations\n other than SCPs. To determine which policy types are enabled in your organization,\n use the \n ListRoots\n operation.

\n
" + "smithy.api#documentation": "

A structure that contains information about the organization.

\n \n

The AvailablePolicyTypes part of the response is deprecated, and you\n shouldn't use it in your apps. It doesn't include any policy type supported by Organizations\n other than SCPs. In the China (Ningxia) Region, no policy type is included.\n To determine which policy types are enabled in your organization,\n use the \n ListRoots\n operation.

\n
" } } }, @@ -4073,7 +4073,7 @@ "smithy.api#examples": [ { "title": "To get information about an organizational unit", - "documentation": "The following example shows how to request details about an OU:/n/n", + "documentation": "The following example shows how to request details about an OU:", "input": { "OrganizationalUnitId": "ou-examplerootid111-exampleouid111" }, @@ -4478,7 +4478,7 @@ "PolicyType": { "target": "com.amazonaws.organizations#PolicyType", "traits": { - "smithy.api#documentation": "

The policy type that you want to disable in this root. You can specify one of the\n following values:

\n ", + "smithy.api#documentation": "

The policy type that you want to disable in this root. You can specify one of the\n following values:

\n ", "smithy.api#required": {} } } @@ -4630,6 +4630,12 @@ "traits": { "smithy.api#enumValue": "AISERVICES_OPT_OUT_POLICY" } + }, + "CHATBOT_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CHATBOT_POLICY" + } } } }, @@ -4679,7 +4685,7 @@ } ], "traits": { - "smithy.api#documentation": "

Enables the integration of an Amazon Web Services service (the service that is specified by\n ServicePrincipal) with Organizations. When you enable integration, you allow\n the specified service to create a service-linked role in\n all the accounts in your organization. This allows the service to perform operations on\n your behalf in your organization and its accounts.

\n \n

We recommend that you enable integration between Organizations and the specified Amazon Web Services\n service by using the console or commands that are provided by the specified service.\n Doing so ensures that the service is aware that it can create the resources that are\n required for the integration. How the service creates those resources in the\n organization's accounts depends on that service. For more information, see the\n documentation for the other Amazon Web Services service.

\n
\n

For more information about enabling services to integrate with Organizations, see Using\n Organizations with other Amazon Web Services services in the\n Organizations User Guide.

\n

You can only call this operation from the organization's management account and only\n if the organization has enabled all\n features.

" + "smithy.api#documentation": "

Provides an Amazon Web Services service (the service that is specified by\n ServicePrincipal) with permissions to view the structure of an organization, \n create a service-linked role in all the accounts in the organization,\n and allow the service to perform operations\n on behalf of the organization and its accounts. Establishing these permissions can be a first step\n in enabling the integration of an Amazon Web Services service with Organizations.

\n \n

We recommend that you enable integration between Organizations and the specified Amazon Web Services\n service by using the console or commands that are provided by the specified service.\n Doing so ensures that the service is aware that it can create the resources that are\n required for the integration. How the service creates those resources in the\n organization's accounts depends on that service. For more information, see the\n documentation for the other Amazon Web Services service.

\n
\n

For more information about enabling services to integrate with Organizations, see Using\n Organizations with other Amazon Web Services services in the\n Organizations User Guide.

\n

You can only call this operation from the organization's management account and only\n if the organization has enabled all\n features.

" } }, "com.amazonaws.organizations#EnableAWSServiceAccessRequest": { @@ -4867,7 +4873,7 @@ "PolicyType": { "target": "com.amazonaws.organizations#PolicyType", "traits": { - "smithy.api#documentation": "

The policy type that you want to enable. You can specify one of the following\n values:

\n ", + "smithy.api#documentation": "

The policy type that you want to enable. You can specify one of the following\n values:

\n ", "smithy.api#required": {} } } @@ -5753,7 +5759,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes a member account from its parent organization. This version of the operation\n is performed by the account that wants to leave. To remove a member account as a user in\n the management account, use RemoveAccountFromOrganization\n instead.

\n

This operation can be called only from a member account in the organization.

\n \n
    \n
  • \n

    The management account in an organization with all features enabled can\n set service control policies (SCPs) that can restrict what administrators of\n member accounts can do. This includes preventing them from successfully\n calling LeaveOrganization and leaving the organization.

    \n
  • \n
  • \n

    You can leave an organization as a member account only if the account is\n configured with the information required to operate as a standalone account.\n When you create an account in an organization using the Organizations console,\n API, or CLI commands, the information required of standalone accounts is\n not automatically collected. For each account that\n you want to make standalone, you must perform the following steps. If any of\n the steps are already completed for this account, that step doesn't\n appear.

    \n
      \n
    • \n

      Choose a support plan

      \n
    • \n
    • \n

      Provide and verify the required contact information

      \n
    • \n
    • \n

      Provide a current payment method

      \n
    • \n
    \n

    Amazon Web Services uses the payment method to charge for any billable (not free tier)\n Amazon Web Services activity that occurs while the account isn't attached to an\n organization. For more information, see Considerations before removing an account from an organization\n in the Organizations User Guide.

    \n
  • \n
  • \n

    The account that you want to leave must not be a delegated administrator\n account for any Amazon Web Services service enabled for your organization. If the account\n is a delegated administrator, you must first change the delegated\n administrator account to another account that is remaining in the\n organization.

    \n
  • \n
  • \n

    You can leave an organization only after you enable IAM user access to\n billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the\n Amazon Web Services Billing and Cost Management User Guide.

    \n
  • \n
  • \n

    After the account leaves the organization, all tags that were attached to\n the account object in the organization are deleted. Amazon Web Services accounts outside\n of an organization do not support tags.

    \n
  • \n
  • \n

    A newly created account has a waiting period before it can be removed from\n its organization. If you get an error that indicates that a wait period is\n required, then try again in a few days.

    \n
  • \n
  • \n

    If you are using an organization principal to call\n LeaveOrganization across multiple accounts, you can only do\n this up to 5 accounts per second in a single organization.

    \n
  • \n
\n
", + "smithy.api#documentation": "

Removes a member account from its parent organization. This version of the operation\n is performed by the account that wants to leave. To remove a member account as a user in\n the management account, use RemoveAccountFromOrganization\n instead.

\n

This operation can be called only from a member account in the organization.

\n \n
    \n
  • \n

    The management account in an organization with all features enabled can\n set service control policies (SCPs) that can restrict what administrators of\n member accounts can do. This includes preventing them from successfully\n calling LeaveOrganization and leaving the organization.

    \n
  • \n
  • \n

    You can leave an organization as a member account only if the account is\n configured with the information required to operate as a standalone account.\n When you create an account in an organization using the Organizations console,\n API, or CLI commands, the information required of standalone accounts is\n not automatically collected. For each account that\n you want to make standalone, you must perform the following steps. If any of\n the steps are already completed for this account, that step doesn't\n appear.

    \n
      \n
    • \n

      Choose a support plan

      \n
    • \n
    • \n

      Provide and verify the required contact information

      \n
    • \n
    • \n

      Provide a current payment method

      \n
    • \n
    \n

    Amazon Web Services uses the payment method to charge for any billable (not free tier)\n Amazon Web Services activity that occurs while the account isn't attached to an\n organization. For more information, see Considerations before removing an account from an organization\n in the Organizations User Guide.

    \n
  • \n
  • \n

    The account that you want to leave must not be a delegated administrator\n account for any Amazon Web Services service enabled for your organization. If the account\n is a delegated administrator, you must first change the delegated\n administrator account to another account that is remaining in the\n organization.

    \n
  • \n
  • \n

    You can leave an organization only after you enable IAM user access to\n billing in your account. For more information, see About IAM access to the Billing and Cost Management console in the\n Amazon Web Services Billing and Cost Management User Guide.

    \n
  • \n
  • \n

    After the account leaves the organization, all tags that were attached to\n the account object in the organization are deleted. Amazon Web Services accounts outside\n of an organization do not support tags.

    \n
  • \n
  • \n

    A newly created account has a waiting period before it can be removed from\n its organization.\n You must wait until at least seven days after the account was created. Invited accounts aren't subject to this waiting period.

    \n
  • \n
  • \n

    If you are using an organization principal to call\n LeaveOrganization across multiple accounts, you can only do\n this up to 5 accounts per second in a single organization.

    \n
  • \n
\n
", "smithy.api#examples": [ { "title": "To leave an organization as a member account", @@ -7116,7 +7122,7 @@ "Filter": { "target": "com.amazonaws.organizations#PolicyType", "traits": { - "smithy.api#documentation": "

The type of policy that you want to include in the returned list. You must specify one\n of the following values:

\n ", + "smithy.api#documentation": "

The type of policy that you want to include in the returned list. You must specify one\n of the following values:

\n ", "smithy.api#required": {} } }, @@ -7163,7 +7169,7 @@ "Filter": { "target": "com.amazonaws.organizations#PolicyType", "traits": { - "smithy.api#documentation": "

Specifies the type of policy that you want to include in the response. You must\n specify one of the following values:

\n ", + "smithy.api#documentation": "

Specifies the type of policy that you want to include in the response. You must\n specify one of the following values:

\n ", "smithy.api#required": {} } }, @@ -8107,6 +8113,12 @@ "traits": { "smithy.api#enumValue": "AISERVICES_OPT_OUT_POLICY" } + }, + "CHATBOT_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CHATBOT_POLICY" + } } } }, diff --git a/models/outposts.json b/models/outposts.json index ab20bd0f19..50c92efa94 100644 --- a/models/outposts.json +++ b/models/outposts.json @@ -3200,7 +3200,7 @@ "Status": { "target": "com.amazonaws.outposts#OrderStatus", "traits": { - "smithy.api#documentation": "

The status of the order.

\n
    \n
  • \n

    \n PREPARING - Order is received and being prepared.

    \n
  • \n
  • \n

    \n IN_PROGRESS - Order is either being built, shipped, or installed. To get\n more details, see the line item status.

    \n
  • \n
  • \n

    \n COMPLETED - Order is complete.

    \n
  • \n
  • \n

    \n CANCELLED - Order is cancelled.

    \n
  • \n
  • \n

    \n ERROR - Customer should contact support.

    \n
  • \n
\n \n

The following status are deprecated: RECEIVED, PENDING,\n PROCESSING, INSTALLING, and FULFILLED.

\n
" + "smithy.api#documentation": "

The status of the order.

\n
    \n
  • \n

    \n PREPARING - Order is received and being prepared.

    \n
  • \n
  • \n

    \n IN_PROGRESS - Order is either being built or shipped. To get\n more details, see the line item status.

    \n
  • \n
  • \n

    \n DELIVERED - Order was delivered to the Outpost site.

    \n
  • \n
  • \n

    \n COMPLETED - Order is complete.

    \n
  • \n
  • \n

    \n CANCELLED - Order is cancelled.

    \n
  • \n
  • \n

    \n ERROR - Customer should contact support.

    \n
  • \n
\n \n

The following status are deprecated: RECEIVED, PENDING,\n PROCESSING, INSTALLING, and FULFILLED.

\n
" } }, "LineItems": { @@ -3305,6 +3305,12 @@ "smithy.api#enumValue": "IN_PROGRESS" } }, + "DELIVERED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DELIVERED" + } + }, "COMPLETED": { "target": "smithy.api#Unit", "traits": { diff --git a/models/pca-connector-scep.json b/models/pca-connector-scep.json index 5e404ec6df..66dbec4d5b 100644 --- a/models/pca-connector-scep.json +++ b/models/pca-connector-scep.json @@ -1368,7 +1368,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "\n

Connector for SCEP (Preview) is in preview release for Amazon Web Services Private Certificate Authority and is subject to change.

\n
\n

Connector for SCEP (Preview) creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more\n information, see Connector for SCEP in the Amazon Web Services Private CA User Guide.

", + "smithy.api#documentation": "

Connector for SCEP creates a connector between Amazon Web Services Private CA and your SCEP-enabled clients and devices. For more information, see Connector for SCEP in the Amazon Web Services Private CA User Guide.

", "smithy.api#title": "Private CA Connector for SCEP", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/pcs.json b/models/pcs.json index 9b954eeee9..b823ea8f27 100644 --- a/models/pcs.json +++ b/models/pcs.json @@ -1156,7 +1156,7 @@ "iamInstanceProfileArn": { "target": "com.amazonaws.pcs#InstanceProfileArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the \n IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have\n pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances\n correctly.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM instance \n profile used to pass an IAM role when launching EC2 instances. The role contained \n in your instance profile must have the pcs:RegisterComputeNodeGroupInstance\n permission. The resource identifier of the ARN must start with \n AWSPCS or it must have /aws-pcs/ in its path.

\n

\n Examples\n

\n
    \n
  • \n

    \n arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1\n

    \n
  • \n
  • \n

    \n arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -1642,7 +1642,7 @@ "iamInstanceProfileArn": { "target": "com.amazonaws.pcs#InstanceProfileArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the \n IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have\n pcs:RegisterComputeNodeGroupInstance permissions attached in order to\n provision instances correctly. The resource identifier of the ARN must start with \n AWSPCS. For example, arn:aws:iam:123456789012:instance-profile/AWSPCSMyComputeNodeInstanceProfile.\n

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM instance \n profile used to pass an IAM role when launching EC2 instances. The role contained \n in your instance profile must have the pcs:RegisterComputeNodeGroupInstance\n permission. The resource identifier of the ARN must start with \n AWSPCS or it must have /aws-pcs/ in its path.

\n

\n Examples\n

\n
    \n
  • \n

    \n arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1\n

    \n
  • \n
  • \n

    \n arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -3362,7 +3362,7 @@ "parameterName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

Amazon Web Services PCS supports configuration of the following Slurm parameters: \n Prolog\n , \n Epilog\n , and \n SelectTypeParameters\n .

", + "smithy.api#documentation": "

Amazon Web Services PCS supports configuration of the following Slurm parameters:

\n ", "smithy.api#required": {} } }, @@ -3722,7 +3722,7 @@ "iamInstanceProfileArn": { "target": "com.amazonaws.pcs#InstanceProfileArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the \n IAM instance profile used to pass an IAM role when launching EC2 instances. The role contained in your instance profile must have\n pcs:RegisterComputeNodeGroupInstance permissions attached to provision instances\n correctly.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM instance \n profile used to pass an IAM role when launching EC2 instances. The role contained \n in your instance profile must have the pcs:RegisterComputeNodeGroupInstance\n permission. The resource identifier of the ARN must start with \n AWSPCS or it must have /aws-pcs/ in its path.

\n

\n Examples\n

\n
    \n
  • \n

    \n arn:aws:iam::111122223333:instance-profile/AWSPCS-example-role-1\n

    \n
  • \n
  • \n

    \n arn:aws:iam::111122223333:instance-profile/aws-pcs/example-role-2\n

    \n
  • \n
" } }, "slurmConfiguration": { diff --git a/models/pinpoint-sms-voice-v2.json b/models/pinpoint-sms-voice-v2.json index e5717832f6..e3bb299025 100644 --- a/models/pinpoint-sms-voice-v2.json +++ b/models/pinpoint-sms-voice-v2.json @@ -204,14 +204,14 @@ "PoolId": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdOrArn", "traits": { - "smithy.api#documentation": "

The pool to update with the new Identity. This value can be either the PoolId or\n PoolArn, and you can find these values using DescribePools.

", + "smithy.api#documentation": "

The pool to update with the new Identity. This value can be either the PoolId or\n PoolArn, and you can find these values using DescribePools.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrSenderIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

", + "smithy.api#documentation": "

The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -1149,7 +1149,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrSenderIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

\n

After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.

", + "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

\n

After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -2339,7 +2339,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrPoolIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or\n PoolArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn and DescribePools to find the values\n of PoolId and PoolArn.

", + "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or\n PoolArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn and DescribePools to find the values\n of PoolId and PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -2478,7 +2478,7 @@ "OptOutListName": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameOrArn", "traits": { - "smithy.api#documentation": "

The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and\n OptOutListArn.

", + "smithy.api#documentation": "

The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and\n OptOutListArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } } @@ -2551,7 +2551,7 @@ "OptOutListName": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameOrArn", "traits": { - "smithy.api#documentation": "

The OptOutListName or OptOutListArn to remove the phone number from.

", + "smithy.api#documentation": "

The OptOutListName or OptOutListArn to remove the phone number from.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -2644,7 +2644,7 @@ "PoolId": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdOrArn", "traits": { - "smithy.api#documentation": "

The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn .

", + "smithy.api#documentation": "

The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn .

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } } @@ -3134,6 +3134,76 @@ "smithy.api#output": {} } }, + "com.amazonaws.pinpointsmsvoicev2#DeleteResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpointsmsvoicev2#DeleteResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.pinpointsmsvoicev2#DeleteResourcePolicyResult" + }, + "errors": [ + { + "target": "com.amazonaws.pinpointsmsvoicev2#AccessDeniedException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#InternalServerException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ThrottlingException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the resource-based policy document attached to the AWS End User Messaging SMS and Voice resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number.

" + } + }, + "com.amazonaws.pinpointsmsvoicev2#DeleteResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.pinpointsmsvoicev2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource you're deleting the resource-based policy from.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pinpointsmsvoicev2#DeleteResourcePolicyResult": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.pinpointsmsvoicev2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource that the resource-based policy was deleted from.

" + } + }, + "Policy": { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourcePolicy", + "traits": { + "smithy.api#documentation": "

The JSON formatted resource-based policy that was deleted.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the resource-based policy was created, in UNIX epoch time format.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.pinpointsmsvoicev2#DeleteTextMessageSpendLimitOverride": { "type": "operation", "input": { @@ -3581,7 +3651,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrPoolIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

", + "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers to find the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -3684,7 +3754,7 @@ "OptOutListNames": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameList", "traits": { - "smithy.api#documentation": "

The OptOutLists to show the details of. This is an array of strings that can be either\n the OptOutListName or OptOutListArn.

" + "smithy.api#documentation": "

The OptOutLists to show the details of. This is an array of strings that can be either\n the OptOutListName or OptOutListArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "NextToken": { @@ -3698,6 +3768,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of results to return per each request.

" } + }, + "Owner": { + "target": "com.amazonaws.pinpointsmsvoicev2#Owner", + "traits": { + "smithy.api#documentation": "

Use SELF to filter the list of Opt-Out List to ones your account owns or use SHARED to filter on Opt-Out List shared with your account. The Owner and OptOutListNames parameters can't be used at the same time.

" + } } }, "traits": { @@ -3762,7 +3838,7 @@ "OptOutListName": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameOrArn", "traits": { - "smithy.api#documentation": "

The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and\n OptOutListArn.

", + "smithy.api#documentation": "

The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and\n OptOutListArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -3865,7 +3941,7 @@ "PhoneNumberIds": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneNumberIdList", "traits": { - "smithy.api#documentation": "

The unique identifier of phone numbers to find information about. This is an array of\n strings that can be either the PhoneNumberId or PhoneNumberArn.

" + "smithy.api#documentation": "

The unique identifier of phone numbers to find information about. This is an array of\n strings that can be either the PhoneNumberId or PhoneNumberArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "Filters": { @@ -3885,6 +3961,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of results to return per each request.

" } + }, + "Owner": { + "target": "com.amazonaws.pinpointsmsvoicev2#Owner", + "traits": { + "smithy.api#documentation": "

Use SELF to filter the list of phone numbers to ones your account owns or use SHARED to filter on phone numbers shared with your account. The Owner and PhoneNumberIds parameters can't be used at the same time.

" + } } }, "traits": { @@ -3949,7 +4031,7 @@ "PoolIds": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdList", "traits": { - "smithy.api#documentation": "

The unique identifier of pools to find. This is an array of strings that can be either\n the PoolId or PoolArn.

" + "smithy.api#documentation": "

The unique identifier of pools to find. This is an array of strings that can be either\n the PoolId or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "Filters": { @@ -3969,6 +4051,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of results to return per each request.

" } + }, + "Owner": { + "target": "com.amazonaws.pinpointsmsvoicev2#Owner", + "traits": { + "smithy.api#documentation": "

Use SELF to filter the list of Pools to ones your account owns or use SHARED to filter on Pools shared with your account. The Owner and PoolIds parameters can't be used at the same time.

" + } } }, "traits": { @@ -4780,7 +4868,7 @@ "SenderIds": { "target": "com.amazonaws.pinpointsmsvoicev2#SenderIdList", "traits": { - "smithy.api#documentation": "

An array of SenderIdAndCountry objects to search for.

" + "smithy.api#documentation": "

An array of SenderIdAndCountry objects to search for.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "Filters": { @@ -4800,6 +4888,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of results to return per each request.

" } + }, + "Owner": { + "target": "com.amazonaws.pinpointsmsvoicev2#Owner", + "traits": { + "smithy.api#documentation": "

Use SELF to filter the list of Sender Ids to ones your account owns or use SHARED to filter on Sender Ids shared with your account. The Owner and SenderIds parameters can't be used at the same time.

" + } } }, "traits": { @@ -5074,14 +5168,14 @@ "PoolId": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdOrArn", "traits": { - "smithy.api#documentation": "

The unique identifier for the pool to disassociate with the origination identity. This\n value can be either the PoolId or PoolArn.

", + "smithy.api#documentation": "

The unique identifier for the pool to disassociate with the origination identity. This\n value can be either the PoolId or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrSenderIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers find the values for\n PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the\n values for SenderId and SenderIdArn.

", + "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers find the values for\n PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the\n values for SenderId and SenderIdArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -5740,6 +5834,76 @@ "smithy.api#output": {} } }, + "com.amazonaws.pinpointsmsvoicev2#GetResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpointsmsvoicev2#GetResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.pinpointsmsvoicev2#GetResourcePolicyResult" + }, + "errors": [ + { + "target": "com.amazonaws.pinpointsmsvoicev2#AccessDeniedException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#InternalServerException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ThrottlingException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves the JSON text of the resource-based policy document attached to the AWS End User Messaging SMS and Voice resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number.

" + } + }, + "com.amazonaws.pinpointsmsvoicev2#GetResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.pinpointsmsvoicev2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pinpointsmsvoicev2#GetResourcePolicyResult": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.pinpointsmsvoicev2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy.

" + } + }, + "Policy": { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourcePolicy", + "traits": { + "smithy.api#documentation": "

The JSON formatted string that contains the resource-based policy attached to the AWS End User Messaging SMS and Voice resource.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the resource-based policy was created, in UNIX epoch time format.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.pinpointsmsvoicev2#IamRoleArn": { "type": "string", "traits": { @@ -6027,7 +6191,7 @@ "PoolId": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdOrArn", "traits": { - "smithy.api#documentation": "

The unique identifier for the pool. This value can be either the PoolId or\n PoolArn.

", + "smithy.api#documentation": "

The unique identifier for the pool. This value can be either the PoolId or\n PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -6665,6 +6829,21 @@ "target": "com.amazonaws.pinpointsmsvoicev2#OriginationIdentityMetadata" } }, + "com.amazonaws.pinpointsmsvoicev2#Owner": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SELF", + "name": "SELF" + }, + { + "value": "SHARED", + "name": "SHARED" + } + ] + } + }, "com.amazonaws.pinpointsmsvoicev2#PhoneNumber": { "type": "string", "traits": { @@ -7016,6 +7195,9 @@ { "target": "com.amazonaws.pinpointsmsvoicev2#DeleteRegistrationFieldValue" }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#DeleteResourcePolicy" + }, { "target": "com.amazonaws.pinpointsmsvoicev2#DeleteTextMessageSpendLimitOverride" }, @@ -7094,6 +7276,9 @@ { "target": "com.amazonaws.pinpointsmsvoicev2#GetProtectConfigurationCountryRuleSet" }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#GetResourcePolicy" + }, { "target": "com.amazonaws.pinpointsmsvoicev2#ListPoolOriginationIdentities" }, @@ -7112,6 +7297,9 @@ { "target": "com.amazonaws.pinpointsmsvoicev2#PutRegistrationFieldValue" }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#PutResourcePolicy" + }, { "target": "com.amazonaws.pinpointsmsvoicev2#ReleasePhoneNumber" }, @@ -8454,7 +8642,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneOrPoolIdOrArn", "traits": { - "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers get the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

", + "smithy.api#documentation": "

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or\n SenderIdArn. You can use DescribePhoneNumbers get the values for\n PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used\n to get the values for SenderId and SenderIdArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -8556,7 +8744,7 @@ "OptOutListName": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameOrArn", "traits": { - "smithy.api#documentation": "

The OptOutListName or OptOutListArn to add the phone number to.

", + "smithy.api#documentation": "

The OptOutListName or OptOutListArn to add the phone number to.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -8737,6 +8925,83 @@ "smithy.api#output": {} } }, + "com.amazonaws.pinpointsmsvoicev2#PutResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpointsmsvoicev2#PutResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.pinpointsmsvoicev2#PutResourcePolicyResult" + }, + "errors": [ + { + "target": "com.amazonaws.pinpointsmsvoicev2#AccessDeniedException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#InternalServerException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ThrottlingException" + }, + { + "target": "com.amazonaws.pinpointsmsvoicev2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Attaches a resource-based policy to a AWS End User Messaging SMS and Voice resource(phone number, sender Id, phone poll, or opt-out list) that is used for\n sharing the resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. For more information about\n resource-based policies, see Working with shared resources in the AWS End User Messaging SMS User Guide.

" + } + }, + "com.amazonaws.pinpointsmsvoicev2#PutResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.pinpointsmsvoicev2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource to attach the resource-based policy to.

", + "smithy.api#required": {} + } + }, + "Policy": { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourcePolicy", + "traits": { + "smithy.api#documentation": "

The JSON formatted resource-based policy to attach.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.pinpointsmsvoicev2#PutResourcePolicyResult": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.pinpointsmsvoicev2#AmazonResourceName", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AWS End User Messaging SMS and Voice resource attached to the resource-based policy.

" + } + }, + "Policy": { + "target": "com.amazonaws.pinpointsmsvoicev2#ResourcePolicy", + "traits": { + "smithy.api#documentation": "

The JSON formatted Resource Policy.

" + } + }, + "CreatedTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time when the resource-based policy was created, in UNIX epoch time format.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.pinpointsmsvoicev2#RegistrationAssociationBehavior": { "type": "string", "traits": { @@ -9824,7 +10089,7 @@ "PhoneNumberId": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneNumberIdOrArn", "traits": { - "smithy.api#documentation": "

The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and\n PhoneNumberArn.

", + "smithy.api#documentation": "

The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and\n PhoneNumberArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } } @@ -10119,13 +10384,13 @@ "OptOutListName": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameOrArn", "traits": { - "smithy.api#documentation": "

The name of the OptOutList to associate with the phone number. You can use the\n OptOutListName or OptOutListArn.

" + "smithy.api#documentation": "

The name of the OptOutList to associate with the phone number. You can use the\n OptOutListName or OptOutListArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "PoolId": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdOrArn", "traits": { - "smithy.api#documentation": "

The pool to associated with the phone number. You can use the PoolId or PoolArn.

" + "smithy.api#documentation": "

The pool to associated with the phone number. You can use the PoolId or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "RegistrationId": { @@ -10484,6 +10749,15 @@ "smithy.api#error": "client" } }, + "com.amazonaws.pinpointsmsvoicev2#ResourcePolicy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10000 + } + } + }, "com.amazonaws.pinpointsmsvoicev2#ResourceType": { "type": "string", "traits": { @@ -10539,6 +10813,10 @@ { "value": "protect-configuration", "name": "PROTECT_CONFIGURATION" + }, + { + "value": "policy", + "name": "POLICY" } ] } @@ -10707,7 +10985,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#VerificationMessageOriginationIdentity", "traits": { - "smithy.api#documentation": "

The origination identity of the message. This can be either the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

" + "smithy.api#documentation": "

The origination identity of the message. This can be either the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "ConfigurationSetName": { @@ -10796,7 +11074,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#MediaMessageOriginationIdentity", "traits": { - "smithy.api#documentation": "

The origination identity of the message. This can be either the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

", + "smithy.api#documentation": "

The origination identity of the message. This can be either the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -10916,7 +11194,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#TextMessageOriginationIdentity", "traits": { - "smithy.api#documentation": "

The origination identity of the message. This can be either the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

" + "smithy.api#documentation": "

The origination identity of the message. This can be either the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "MessageBody": { @@ -11047,7 +11325,7 @@ "OriginationIdentity": { "target": "com.amazonaws.pinpointsmsvoicev2#VoiceMessageOriginationIdentity", "traits": { - "smithy.api#documentation": "

The origination identity to use for the voice call. This can be the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn.

", + "smithy.api#documentation": "

The origination identity to use for the voice call. This can be the PhoneNumber,\n PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -12469,7 +12747,7 @@ "PhoneNumberId": { "target": "com.amazonaws.pinpointsmsvoicev2#PhoneNumberIdOrArn", "traits": { - "smithy.api#documentation": "

The unique identifier of the phone number. Valid values for this field can be either\n the PhoneNumberId or PhoneNumberArn.

", + "smithy.api#documentation": "

The unique identifier of the phone number. Valid values for this field can be either\n the PhoneNumberId or PhoneNumberArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -12665,7 +12943,7 @@ "PoolId": { "target": "com.amazonaws.pinpointsmsvoicev2#PoolIdOrArn", "traits": { - "smithy.api#documentation": "

The unique identifier of the pool to update. Valid values are either the PoolId or\n PoolArn.

", + "smithy.api#documentation": "

The unique identifier of the pool to update. Valid values are either the PoolId or\n PoolArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
", "smithy.api#required": {} } }, @@ -12696,7 +12974,7 @@ "OptOutListName": { "target": "com.amazonaws.pinpointsmsvoicev2#OptOutListNameOrArn", "traits": { - "smithy.api#documentation": "

The OptOutList to associate with the pool. Valid values are either OptOutListName or\n OptOutListArn.

" + "smithy.api#documentation": "

The OptOutList to associate with the pool. Valid values are either OptOutListName or\n OptOutListArn.

\n \n

If you are using a shared AWS End User Messaging SMS and Voice resource then you must use the full Amazon Resource Name(ARN).

\n
" } }, "SharedRoutesEnabled": { diff --git a/models/pricing.json b/models/pricing.json index ded5814c1c..4f6cdf0b49 100644 --- a/models/pricing.json +++ b/models/pricing.json @@ -33,7 +33,7 @@ "name": "pricing" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

The Amazon Web Services Price List API is a centralized and convenient way to programmatically\n query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location,\n Storage Class, and Operating System, and provides prices at\n the SKU level. You can use the Amazon Web Services Price List to do the following:

\n
    \n
  • \n

    Build cost control and scenario planning tools

    \n
  • \n
  • \n

    Reconcile billing data

    \n
  • \n
  • \n

    Forecast future spend for budgeting purposes

    \n
  • \n
  • \n

    Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

    \n
  • \n
\n

Use GetServices without a service code to retrieve the service codes for\n all Amazon Web Services, then GetServices with a service code to\n retrieve the attribute names for that service. After you have the service code and\n attribute names, you can use GetAttributeValues to see what values are\n available for an attribute. With the service code and an attribute name and value, you can\n use GetProducts to find specific products that you're interested in, such as\n an AmazonEC2 instance, with a Provisioned IOPS\n volumeType.

\n

For more information, see Using the\n Amazon Web Services Price List API in the Billing User\n Guide.

", + "smithy.api#documentation": "

The Amazon Web Services Price List API is a centralized and convenient way to programmatically\n query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location,\n Storage Class, and Operating System, and provides prices at\n the SKU level. You can use the Amazon Web Services Price List to do the following:

\n
    \n
  • \n

    Build cost control and scenario planning tools

    \n
  • \n
  • \n

    Reconcile billing data

    \n
  • \n
  • \n

    Forecast future spend for budgeting purposes

    \n
  • \n
  • \n

    Provide cost benefit analysis that compare your internal workloads with Amazon Web Services

    \n
  • \n
\n

Use GetServices without a service code to retrieve the service codes for\n all Amazon Web Services services, then GetServices with a service code to\n retrieve the attribute names for that service. After you have the service code and\n attribute names, you can use GetAttributeValues to see what values are\n available for an attribute. With the service code and an attribute name and value, you can\n use GetProducts to find specific products that you're interested in, such as\n an AmazonEC2 instance, with a Provisioned IOPS\n volumeType.

\n

For more information, see Using the\n Amazon Web Services Price List API in the Billing User\n Guide.

", "smithy.api#title": "AWS Price List Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -788,6 +788,33 @@ ], "traits": { "smithy.api#documentation": "

Returns the metadata for one service or a list of the metadata for all services. Use\n this without a service code to get the service codes for all services.\n Use it with a service code, such as AmazonEC2, to get information specific to \n that service, such as the attribute \n names available for that service. For example, some of the attribute names available for EC2 are \n volumeType, maxIopsVolume, operation,\n locationType, and instanceCapacity10xlarge.

", + "smithy.api#examples": [ + { + "title": "To retrieve a list of services and service codes", + "documentation": "Retrieves the service for the given Service Code.", + "input": { + "ServiceCode": "AmazonEC2", + "FormatVersion": "aws_v1", + "MaxResults": 1 + }, + "output": { + "FormatVersion": "aws_v1", + "NextToken": "abcdefg123", + "Services": [ + { + "AttributeNames": [ + "volumeType", + "maxIopsvolume", + "instanceCapacity10xlarge", + "locationType", + "operation" + ], + "ServiceCode": "AmazonEC2" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -962,6 +989,28 @@ ], "traits": { "smithy.api#documentation": "

Returns a list of attribute values. Attributes are similar to the details \n in a Price List API offer file. For a list of available attributes, see \n Offer File Definitions\n in the Billing and Cost Management User Guide.

", + "smithy.api#examples": [ + { + "title": "To retrieve a list of attribute values", + "documentation": "This operation returns a list of values available for the given attribute.", + "input": { + "ServiceCode": "AmazonEC2", + "AttributeName": "volumeType", + "MaxResults": 2 + }, + "output": { + "NextToken": "GpgauEXAMPLEezucl5LV0w==:7GzYJ0nw0DBTJ2J66EoTIIynE6O1uXwQtTRqioJzQadBnDVgHPzI1en4BUQnPCLpzeBk9RQQAWaFieA4+DapFAGLgk+Z/9/cTw9GldnPOHN98+FdmJP7wKU3QQpQ8MQr5KOeBkIsAqvAQYdL0DkL7tHwPtE5iCEByAmg9gcC/yBU1vAOsf7R3VaNN4M5jMDv3woSWqASSIlBVB6tgW78YL22KhssoItM/jWW+aP6Jqtq4mldxp/ct6DWAl+xLFwHU/CbketimPPXyqHF3/UXDw==", + "AttributeValues": [ + { + "Value": "Throughput Optimized HDD" + }, + { + "Value": "Provisioned IOPS" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", diff --git a/models/qbusiness.json b/models/qbusiness.json index 8565234f25..0316556226 100644 --- a/models/qbusiness.json +++ b/models/qbusiness.json @@ -1312,7 +1312,7 @@ "userGroups": { "target": "com.amazonaws.qbusiness#UserGroups", "traits": { - "smithy.api#documentation": "

The groups that a user associated with the chat input belongs to.

", + "smithy.api#documentation": "

The group names that a user associated with the chat input belongs to.

", "smithy.api#httpQuery": "userGroups" } }, @@ -1546,7 +1546,7 @@ "userGroups": { "target": "com.amazonaws.qbusiness#UserGroups", "traits": { - "smithy.api#documentation": "

The groups that a user associated with the chat input belongs to.

", + "smithy.api#documentation": "

The group names that a user associated with the chat input belongs to.

", "smithy.api#httpQuery": "userGroups" } }, @@ -1958,7 +1958,7 @@ "roleArn": { "target": "com.amazonaws.qbusiness#RoleArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon\n CloudWatch logs and metrics.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon\n CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a service linked role (SLR) and use it as the\n application's role.

" } }, "identityType": { @@ -2748,6 +2748,12 @@ "smithy.api#documentation": "

Determines whether sample prompts are enabled in the web experience for an end\n user.

" } }, + "origins": { + "target": "com.amazonaws.qbusiness#WebExperienceOrigins", + "traits": { + "smithy.api#documentation": "

Sets the website domain origins that \n are allowed to embed the Amazon Q Business web experience.\n \n The domain origin refers to the \n base URL for accessing a website including the protocol \n (http/https), the domain name, and the port number (if specified).\n

\n \n

You must only submit a base URL and \n not a full path. For example, https://docs.aws.amazon.com.

\n
" + } + }, "roleArn": { "target": "com.amazonaws.qbusiness#RoleArn", "traits": { @@ -6376,6 +6382,12 @@ "smithy.api#documentation": "

Determines whether sample prompts are enabled in the web experience for an end\n user.

" } }, + "origins": { + "target": "com.amazonaws.qbusiness#WebExperienceOrigins", + "traits": { + "smithy.api#documentation": "

Gets the website domain origins that \n are allowed to embed the Amazon Q Business web experience.\n \n The domain origin refers to the \n base URL for accessing a website including the protocol \n (http/https), the domain name, and the port number (if specified).\n

" + } + }, "roleArn": { "target": "com.amazonaws.qbusiness#RoleArn", "traits": { @@ -8713,6 +8725,16 @@ "smithy.api#documentation": "

Information about the OIDC-compliant identity provider (IdP) used to authenticate end\n users of an Amazon Q Business web experience.

" } }, + "com.amazonaws.qbusiness#Origin": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^(http://|https://)[a-zA-Z0-9-_.]+(?::[0-9]{1,5})?$" + } + }, "com.amazonaws.qbusiness#Payload": { "type": "string", "traits": { @@ -9290,13 +9312,13 @@ "qAppsControlMode": { "target": "com.amazonaws.qbusiness#QAppsControlMode", "traits": { - "smithy.api#documentation": "

Status information about whether end users can create and use Amazon Q Apps in the web experience.

", + "smithy.api#documentation": "

Status information about whether end users can create and use Amazon Q Apps in the web\n experience.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Configuration information about Amazon Q Apps. (preview feature)

" + "smithy.api#documentation": "

Configuration information about Amazon Q Apps.

" } }, "com.amazonaws.qbusiness#QAppsControlMode": { @@ -11439,6 +11461,12 @@ "traits": { "smithy.api#documentation": "

Information about the identity provider (IdP) used to authenticate end users of an\n Amazon Q Business web experience.

" } + }, + "origins": { + "target": "com.amazonaws.qbusiness#WebExperienceOrigins", + "traits": { + "smithy.api#documentation": "

Updates the website domain origins that \n are allowed to embed the Amazon Q Business web experience.\n \n The domain origin refers to the \n base URL for accessing a website including the protocol \n (http/https), the domain name, and the port number (if specified).

\n \n
    \n
  • \n

    Any values except null submitted as part of this \n update will replace all previous values.

    \n
  • \n
  • \n

    You must only submit a base URL and \n not a full path. For example, https://docs.aws.amazon.com.

    \n
  • \n
\n
" + } } }, "traits": { @@ -11538,12 +11566,12 @@ "userGroups": { "target": "com.amazonaws.qbusiness#UserGroups", "traits": { - "smithy.api#documentation": "

The user groups associated with a topic control rule.

" + "smithy.api#documentation": "

The user group names associated with a topic control rule.

" } } }, "traits": { - "smithy.api#documentation": "

Provides information about users and groups associated with a topic control\n rule.

" + "smithy.api#documentation": "

Provides information about users and group names associated with a topic control\n rule.

" } }, "com.amazonaws.qbusiness#ValidationException": { @@ -11696,6 +11724,18 @@ "smithy.api#pattern": "^[a-zA-Z0-9][a-zA-Z0-9-]*$" } }, + "com.amazonaws.qbusiness#WebExperienceOrigins": { + "type": "list", + "member": { + "target": "com.amazonaws.qbusiness#Origin" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, "com.amazonaws.qbusiness#WebExperienceResource": { "type": "resource", "identifiers": { diff --git a/models/qconnect.json b/models/qconnect.json index 363e641eca..a468eff5e3 100644 --- a/models/qconnect.json +++ b/models/qconnect.json @@ -1,190 +1,172 @@ { "smithy": "2.0", "shapes": { - "com.amazonaws.qconnect#AccessDeniedException": { - "type": "structure", - "members": { - "message": { - "target": "smithy.api#String" - } - }, - "traits": { - "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.qconnect#AmazonConnectGuideAssociationData": { - "type": "structure", - "members": { - "flowId": { - "target": "com.amazonaws.qconnect#GenericArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Amazon Connect flow. Step-by-step guides are a type of flow.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

Content association data for a step-by-step\n guide.

" - } - }, - "com.amazonaws.qconnect#AndConditions": { - "type": "list", - "member": { - "target": "com.amazonaws.qconnect#TagCondition" - } - }, - "com.amazonaws.qconnect#AppIntegrationsConfiguration": { - "type": "structure", - "members": { - "appIntegrationArn": { - "target": "com.amazonaws.qconnect#GenericArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.

\n
    \n
  • \n

    For Salesforce, your AppIntegrations DataIntegration must have an ObjectConfiguration if\n objectFields is not provided, including at least Id,\n ArticleNumber, VersionNumber, Title,\n PublishStatus, and IsDeleted as source fields.

    \n
  • \n
  • \n

    For ServiceNow, your AppIntegrations DataIntegration must have an ObjectConfiguration if\n objectFields is not provided, including at least number,\n short_description, sys_mod_count, workflow_state,\n and active as source fields.

    \n
  • \n
  • \n

    For \n Zendesk, your AppIntegrations DataIntegration must have an ObjectConfiguration if\n objectFields is not provided, including at least id,\n title, updated_at, and draft as source fields.\n

    \n
  • \n
  • \n

    For SharePoint, your AppIntegrations DataIntegration must have a FileConfiguration,\n including only file extensions that are among docx, pdf,\n html, htm, and txt.

    \n
  • \n
  • \n

    For Amazon S3, the\n ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null.\n The SourceURI of your DataIntegration must use the following format:\n s3://your_s3_bucket_name.

    \n \n

    The bucket policy of the corresponding S3 bucket must allow the Amazon Web Services\n principal app-integrations.amazonaws.com to perform\n s3:ListBucket, s3:GetObject, and\n s3:GetBucketLocation against the bucket.

    \n
    \n
  • \n
", - "smithy.api#required": {} - } - }, - "objectFields": { - "target": "com.amazonaws.qconnect#ObjectFieldsList", - "traits": { - "smithy.api#documentation": "

The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if\n ObjectConfiguration is included in the provided DataIntegration.

\n
    \n
  • \n

    For Salesforce, you must include at least Id,\n ArticleNumber, VersionNumber, Title,\n PublishStatus, and IsDeleted.

    \n
  • \n
  • \n

    For ServiceNow, you must include at least number,\n short_description, sys_mod_count, workflow_state,\n and active.

    \n
  • \n
  • \n

    For \n Zendesk, you must include at least id, title,\n updated_at, and draft.

    \n
  • \n
\n

Make sure to include additional fields. These fields are indexed and used to source\n recommendations.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

Configuration information for Amazon AppIntegrations to automatically ingest content.

" - } - }, - "com.amazonaws.qconnect#Arn": { - "type": "string", - "traits": { - "smithy.api#pattern": "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" - } - }, - "com.amazonaws.qconnect#Assistant": { + "com.amazonaws.qconnect#AIAgent": { "type": "resource", "identifiers": { "assistantId": { "target": "com.amazonaws.qconnect#UuidOrArn" + }, + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier" } }, "create": { - "target": "com.amazonaws.qconnect#CreateAssistant" + "target": "com.amazonaws.qconnect#CreateAIAgent" }, "read": { - "target": "com.amazonaws.qconnect#GetAssistant" + "target": "com.amazonaws.qconnect#GetAIAgent" + }, + "update": { + "target": "com.amazonaws.qconnect#UpdateAIAgent" }, "delete": { - "target": "com.amazonaws.qconnect#DeleteAssistant" + "target": "com.amazonaws.qconnect#DeleteAIAgent" }, "list": { - "target": "com.amazonaws.qconnect#ListAssistants" + "target": "com.amazonaws.qconnect#ListAIAgents" }, "operations": [ { - "target": "com.amazonaws.qconnect#GetRecommendations" - }, - { - "target": "com.amazonaws.qconnect#NotifyRecommendationsReceived" - }, - { - "target": "com.amazonaws.qconnect#PutFeedback" - }, - { - "target": "com.amazonaws.qconnect#QueryAssistant" + "target": "com.amazonaws.qconnect#CreateAIAgentVersion" }, { - "target": "com.amazonaws.qconnect#SearchSessions" - } - ], - "resources": [ - { - "target": "com.amazonaws.qconnect#AssistantAssociation" + "target": "com.amazonaws.qconnect#DeleteAIAgentVersion" }, { - "target": "com.amazonaws.qconnect#Session" + "target": "com.amazonaws.qconnect#ListAIAgentVersions" } ], "traits": { "aws.api#arn": { - "template": "assistant/{assistantId}" + "template": "ai-agent/{assistantId}/{aiAgentId}" }, - "aws.cloudformation#cfnResource": {}, "aws.iam#disableConditionKeyInference": {} } }, - "com.amazonaws.qconnect#AssistantAssociation": { - "type": "resource", - "identifiers": { - "assistantId": { - "target": "com.amazonaws.qconnect#UuidOrArn" + "com.amazonaws.qconnect#AIAgentAssociationConfigurationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "KNOWLEDGE_BASE", + "name": "KNOWLEDGE_BASE" + } + ] + } + }, + "com.amazonaws.qconnect#AIAgentConfiguration": { + "type": "union", + "members": { + "manualSearchAIAgentConfiguration": { + "target": "com.amazonaws.qconnect#ManualSearchAIAgentConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for AI Agents of type MANUAL_SEARCH.

" + } }, - "assistantAssociationId": { - "target": "com.amazonaws.qconnect#UuidOrArn" + "answerRecommendationAIAgentConfiguration": { + "target": "com.amazonaws.qconnect#AnswerRecommendationAIAgentConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for AI Agents of type ANSWER_RECOMMENDATION.

" + } } }, - "create": { - "target": "com.amazonaws.qconnect#CreateAssistantAssociation" - }, - "read": { - "target": "com.amazonaws.qconnect#GetAssistantAssociation" - }, - "delete": { - "target": "com.amazonaws.qconnect#DeleteAssistantAssociation" - }, - "list": { - "target": "com.amazonaws.qconnect#ListAssistantAssociations" + "traits": { + "smithy.api#documentation": "

A typed union that specifies the configuration based on the type of AI Agent.

" + } + }, + "com.amazonaws.qconnect#AIAgentConfigurationData": { + "type": "structure", + "members": { + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidWithQualifier", + "traits": { + "smithy.api#documentation": "

The ID of the AI Agent to be configured.

", + "smithy.api#required": {} + } + } }, "traits": { - "aws.api#arn": { - "template": "association/{assistantId}/{assistantAssociationId}" - }, - "aws.cloudformation#cfnResource": {}, - "aws.iam#disableConditionKeyInference": {} + "smithy.api#documentation": "

A type that specifies the AI Agent ID configuration data when mapping an AI Agents to be\n used for an AI Agent type on a session or assistant.

" } }, - "com.amazonaws.qconnect#AssistantAssociationData": { + "com.amazonaws.qconnect#AIAgentConfigurationMap": { + "type": "map", + "key": { + "target": "com.amazonaws.qconnect#AIAgentType" + }, + "value": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationData" + } + }, + "com.amazonaws.qconnect#AIAgentData": { "type": "structure", "members": { - "assistantAssociationId": { + "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the assistant association.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", "smithy.api#required": {} } }, - "assistantAssociationArn": { + "assistantArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the assistant association.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, - "assistantId": { + "aiAgentId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", + "smithy.api#documentation": "

The identifier of the AI Agent.

", "smithy.api#required": {} } }, - "assistantArn": { + "aiAgentArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AI agent.

", "smithy.api#required": {} } }, - "associationType": { - "target": "com.amazonaws.qconnect#AssociationType", + "name": { + "target": "com.amazonaws.qconnect#Name", "traits": { - "smithy.api#documentation": "

The type of association.

", + "smithy.api#documentation": "

The name of the AI Agent.

", "smithy.api#required": {} } }, - "associationData": { - "target": "com.amazonaws.qconnect#AssistantAssociationOutputData", + "type": { + "target": "com.amazonaws.qconnect#AIAgentType", "traits": { - "smithy.api#documentation": "

A union type that currently has a single argument, the knowledge base ID.

", + "smithy.api#documentation": "

The type of the AI Agent.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.qconnect#AIAgentConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration for the AI Agent.

", + "smithy.api#required": {} + } + }, + "modifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the AI Agent was last modified.

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the AI Agent.

" + } + }, + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", + "traits": { + "smithy.api#documentation": "

The visibility status of the AI Agent.

", "smithy.api#required": {} } }, @@ -193,148 +175,239 @@ "traits": { "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" } + }, + "origin": { + "target": "com.amazonaws.qconnect#Origin", + "traits": { + "smithy.api#documentation": "

Specifies the origin of the AI Agent. SYSTEM for a default AI Agent created\n by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent creation\n APIs.

" + } + }, + "status": { + "target": "com.amazonaws.qconnect#Status", + "traits": { + "smithy.api#documentation": "

The status of the AI Agent.

" + } } }, "traits": { - "smithy.api#documentation": "

Information about the assistant association.

", + "smithy.api#documentation": "

The data for the AI Agent.

", "smithy.api#references": [ { - "resource": "com.amazonaws.qconnect#AssistantAssociation" + "resource": "com.amazonaws.qconnect#AIAgent" } ] } }, - "com.amazonaws.qconnect#AssistantAssociationInputData": { - "type": "union", - "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#Uuid", - "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The data that is input into Amazon Q in Connect as a result of the assistant association.

" - } - }, - "com.amazonaws.qconnect#AssistantAssociationOutputData": { - "type": "union", + "com.amazonaws.qconnect#AIAgentSummary": { + "type": "structure", "members": { - "knowledgeBaseAssociation": { - "target": "com.amazonaws.qconnect#KnowledgeBaseAssociationData", + "name": { + "target": "com.amazonaws.qconnect#Name", "traits": { - "smithy.api#documentation": "

The knowledge base where output data is sent.

" + "smithy.api#documentation": "

The name of the AI Agent.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

The data that is output as a result of the assistant association.

" - } - }, - "com.amazonaws.qconnect#AssistantAssociationSummary": { - "type": "structure", - "members": { - "assistantAssociationId": { + }, + "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the assistant association.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", "smithy.api#required": {} } }, - "assistantAssociationArn": { + "assistantArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the assistant association.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, - "assistantId": { + "aiAgentId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", + "smithy.api#documentation": "

The identifier of the AI Agent.

", "smithy.api#required": {} } }, - "assistantArn": { - "target": "com.amazonaws.qconnect#Arn", + "type": { + "target": "com.amazonaws.qconnect#AIAgentType", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", + "smithy.api#documentation": "

The type of the AI Agent.

", "smithy.api#required": {} } }, - "associationType": { - "target": "com.amazonaws.qconnect#AssociationType", + "aiAgentArn": { + "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The type of association.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AI agent.

", "smithy.api#required": {} } }, - "associationData": { - "target": "com.amazonaws.qconnect#AssistantAssociationOutputData", + "modifiedTime": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The association data.

", + "smithy.api#documentation": "

The time the AI Agent was last modified.

" + } + }, + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", + "traits": { + "smithy.api#documentation": "

The visibility status of the AI Agent.

", "smithy.api#required": {} } }, - "tags": { - "target": "com.amazonaws.qconnect#Tags", + "configuration": { + "target": "com.amazonaws.qconnect#AIAgentConfiguration", "traits": { - "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + "smithy.api#documentation": "

The configuration for the AI Agent.

" } - } - }, - "traits": { - "smithy.api#documentation": "

Summary information about the assistant association.

", - "smithy.api#references": [ - { - "resource": "com.amazonaws.qconnect#AssistantAssociation" + }, + "origin": { + "target": "com.amazonaws.qconnect#Origin", + "traits": { + "smithy.api#documentation": "

The origin of the AI Agent. SYSTEM for a default AI Agent created by Q in\n Connect or CUSTOMER for an AI Agent created by calling AI Agent creation\n APIs.

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the AI Agent.

" + } + }, + "status": { + "target": "com.amazonaws.qconnect#Status", + "traits": { + "smithy.api#documentation": "

The status of the AI Agent.

" + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of the AI Agent.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#AIAgent" } ] } }, - "com.amazonaws.qconnect#AssistantAssociationSummaryList": { + "com.amazonaws.qconnect#AIAgentSummaryList": { "type": "list", "member": { - "target": "com.amazonaws.qconnect#AssistantAssociationSummary" + "target": "com.amazonaws.qconnect#AIAgentSummary" } }, - "com.amazonaws.qconnect#AssistantCapabilityConfiguration": { + "com.amazonaws.qconnect#AIAgentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "MANUAL_SEARCH", + "name": "MANUAL_SEARCH" + }, + { + "value": "ANSWER_RECOMMENDATION", + "name": "ANSWER_RECOMMENDATION" + } + ] + } + }, + "com.amazonaws.qconnect#AIAgentVersionSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#AIAgentVersionSummary" + } + }, + "com.amazonaws.qconnect#AIAgentVersionSummary": { "type": "structure", "members": { - "type": { - "target": "com.amazonaws.qconnect#AssistantCapabilityType", + "aiAgentSummary": { + "target": "com.amazonaws.qconnect#AIAgentSummary", "traits": { - "smithy.api#documentation": "

The type of Amazon Q in Connect assistant capability.

" + "smithy.api#documentation": "

The data for the summary of the AI Agent version.

" + } + }, + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", + "traits": { + "smithy.api#documentation": "

The version number for this AI Agent version.

" } } }, "traits": { - "smithy.api#documentation": "

The capability configuration for an Amazon Q in Connect assistant.

" + "smithy.api#documentation": "

The summary of the AI Agent version.

" } }, - "com.amazonaws.qconnect#AssistantCapabilityType": { + "com.amazonaws.qconnect#AIPrompt": { + "type": "resource", + "identifiers": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn" + }, + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier" + } + }, + "create": { + "target": "com.amazonaws.qconnect#CreateAIPrompt" + }, + "read": { + "target": "com.amazonaws.qconnect#GetAIPrompt" + }, + "update": { + "target": "com.amazonaws.qconnect#UpdateAIPrompt" + }, + "delete": { + "target": "com.amazonaws.qconnect#DeleteAIPrompt" + }, + "list": { + "target": "com.amazonaws.qconnect#ListAIPrompts" + }, + "operations": [ + { + "target": "com.amazonaws.qconnect#CreateAIPromptVersion" + }, + { + "target": "com.amazonaws.qconnect#DeleteAIPromptVersion" + }, + { + "target": "com.amazonaws.qconnect#ListAIPromptVersions" + } + ], + "traits": { + "aws.api#arn": { + "template": "ai-prompt/{assistantId}/{aiPromptId}" + }, + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.qconnect#AIPromptAPIFormat": { "type": "string", "traits": { "smithy.api#enum": [ { - "value": "V1", - "name": "V1" + "value": "ANTHROPIC_CLAUDE_MESSAGES", + "name": "ANTHROPIC_CLAUDE_MESSAGES" }, { - "value": "V2", - "name": "V2" + "value": "ANTHROPIC_CLAUDE_TEXT_COMPLETIONS", + "name": "ANTHROPIC_CLAUDE_TEXT_COMPLETIONS" } ] } }, - "com.amazonaws.qconnect#AssistantData": { + "com.amazonaws.qconnect#AIPromptData": { "type": "structure", "members": { "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", "smithy.api#required": {} } }, @@ -345,120 +418,132 @@ "smithy.api#required": {} } }, + "aiPromptId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt.

", + "smithy.api#required": {} + } + }, + "aiPromptArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AI Prompt.

", + "smithy.api#required": {} + } + }, "name": { "target": "com.amazonaws.qconnect#Name", "traits": { - "smithy.api#documentation": "

The name.

", + "smithy.api#documentation": "

The name of the AI Prompt

", "smithy.api#required": {} } }, "type": { - "target": "com.amazonaws.qconnect#AssistantType", + "target": "com.amazonaws.qconnect#AIPromptType", "traits": { - "smithy.api#documentation": "

The type of assistant.

", + "smithy.api#documentation": "

The type of this AI Prompt.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.qconnect#AssistantStatus", + "templateType": { + "target": "com.amazonaws.qconnect#AIPromptTemplateType", "traits": { - "smithy.api#documentation": "

The status of the assistant.

", + "smithy.api#documentation": "

The type of the prompt template for this AI Prompt.

", + "smithy.api#required": {} + } + }, + "modelId": { + "target": "com.amazonaws.qconnect#AIPromptModelIdentifier", + "traits": { + "smithy.api#documentation": "

The identifier of the model used for this AI Prompt. Model Ids supported are:\n CLAUDE_3_HAIKU_20240307_V1.

", + "smithy.api#required": {} + } + }, + "apiFormat": { + "target": "com.amazonaws.qconnect#AIPromptAPIFormat", + "traits": { + "smithy.api#documentation": "

The API format used for this AI Prompt.

", + "smithy.api#required": {} + } + }, + "templateConfiguration": { + "target": "com.amazonaws.qconnect#AIPromptTemplateConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the prompt template for this AI Prompt.

", "smithy.api#required": {} } }, + "modifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the AI Prompt was last modified.

" + } + }, "description": { "target": "com.amazonaws.qconnect#Description", "traits": { - "smithy.api#documentation": "

The description.

" + "smithy.api#documentation": "

The description of the AI Prompt.

" } }, - "tags": { - "target": "com.amazonaws.qconnect#Tags", + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", "traits": { - "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + "smithy.api#documentation": "

The visibility status of the AI Prompt.

", + "smithy.api#required": {} } }, - "serverSideEncryptionConfiguration": { - "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", + "tags": { + "target": "com.amazonaws.qconnect#Tags", "traits": { - "smithy.api#documentation": "

The configuration information for the customer managed key used for encryption.

\n

This KMS key must have a policy that allows kms:CreateGrant,\n kms:DescribeKey, kms:Decrypt, and\n kms:GenerateDataKey* permissions to the IAM identity using the\n key to invoke Amazon Q in Connect. To use Amazon Q in Connect with chat, the key policy must also allow\n kms:Decrypt, kms:GenerateDataKey*, and\n kms:DescribeKey permissions to the connect.amazonaws.com service\n principal.

\n

For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for\n your instance.

" + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" } }, - "integrationConfiguration": { - "target": "com.amazonaws.qconnect#AssistantIntegrationConfiguration", + "origin": { + "target": "com.amazonaws.qconnect#Origin", "traits": { - "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant integration.

" + "smithy.api#documentation": "

The origin of the AI Prompt. SYSTEM for a default AI Prompt created by Q in\n Connect or CUSTOMER for an AI Prompt created by calling AI Prompt creation APIs.\n

" } }, - "capabilityConfiguration": { - "target": "com.amazonaws.qconnect#AssistantCapabilityConfiguration", + "status": { + "target": "com.amazonaws.qconnect#Status", "traits": { - "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant capability.

" + "smithy.api#documentation": "

The status of the AI Prompt.

" } } }, "traits": { - "smithy.api#documentation": "

The assistant data.

" - } - }, - "com.amazonaws.qconnect#AssistantIntegrationConfiguration": { - "type": "structure", - "members": { - "topicIntegrationArn": { - "target": "com.amazonaws.qconnect#GenericArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.

" + "smithy.api#documentation": "

The data for the AI Prompt

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#AIPrompt" } - } - }, - "traits": { - "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant integration.

" - } - }, - "com.amazonaws.qconnect#AssistantList": { - "type": "list", - "member": { - "target": "com.amazonaws.qconnect#AssistantSummary" + ] } }, - "com.amazonaws.qconnect#AssistantStatus": { + "com.amazonaws.qconnect#AIPromptModelIdentifier": { "type": "string", "traits": { - "smithy.api#enum": [ - { - "value": "CREATE_IN_PROGRESS", - "name": "CREATE_IN_PROGRESS" - }, - { - "value": "CREATE_FAILED", - "name": "CREATE_FAILED" - }, - { - "value": "ACTIVE", - "name": "ACTIVE" - }, - { - "value": "DELETE_IN_PROGRESS", - "name": "DELETE_IN_PROGRESS" - }, - { - "value": "DELETE_FAILED", - "name": "DELETE_FAILED" - }, - { - "value": "DELETED", - "name": "DELETED" - } - ] + "smithy.api#length": { + "min": 1, + "max": 2048 + } } }, - "com.amazonaws.qconnect#AssistantSummary": { + "com.amazonaws.qconnect#AIPromptSummary": { "type": "structure", "members": { + "name": { + "target": "com.amazonaws.qconnect#Name", + "traits": { + "smithy.api#documentation": "

The name of the AI Prompt.

", + "smithy.api#required": {} + } + }, "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", "smithy.api#required": {} } }, @@ -469,127 +554,172 @@ "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.qconnect#Name", + "aiPromptId": { + "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The name of the assistant.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt.

", "smithy.api#required": {} } }, "type": { - "target": "com.amazonaws.qconnect#AssistantType", + "target": "com.amazonaws.qconnect#AIPromptType", "traits": { - "smithy.api#documentation": "

The type of the assistant.

", + "smithy.api#documentation": "

The type of this AI Prompt.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.qconnect#AssistantStatus", + "aiPromptArn": { + "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The status of the assistant.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AI Prompt.

", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.qconnect#Description", + "modifiedTime": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The description of the assistant.

" + "smithy.api#documentation": "

The time the AI Prompt was last modified.

" } }, - "tags": { - "target": "com.amazonaws.qconnect#Tags", + "templateType": { + "target": "com.amazonaws.qconnect#AIPromptTemplateType", "traits": { - "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + "smithy.api#documentation": "

The type of the prompt template for this AI Prompt.

", + "smithy.api#required": {} } }, - "serverSideEncryptionConfiguration": { - "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", + "modelId": { + "target": "com.amazonaws.qconnect#AIPromptModelIdentifier", "traits": { - "smithy.api#documentation": "

The configuration information for the customer managed key used for encryption.

\n

This KMS key must have a policy that allows kms:CreateGrant,\n kms:DescribeKey, kms:Decrypt, and\n kms:GenerateDataKey* permissions to the IAM identity using the\n key to invoke Amazon Q in Connect. To use Amazon Q in Connect with chat, the key policy must also allow\n kms:Decrypt, kms:GenerateDataKey*, and\n kms:DescribeKey permissions to the connect.amazonaws.com service\n principal.

\n

For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for\n your instance.

" + "smithy.api#documentation": "

The identifier of the model used for this AI Prompt. Model Ids supported are:\n CLAUDE_3_HAIKU_20240307_V1.

", + "smithy.api#required": {} } }, - "integrationConfiguration": { - "target": "com.amazonaws.qconnect#AssistantIntegrationConfiguration", + "apiFormat": { + "target": "com.amazonaws.qconnect#AIPromptAPIFormat", "traits": { - "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant integration.

" + "smithy.api#documentation": "

The API format used for this AI Prompt.

", + "smithy.api#required": {} } }, - "capabilityConfiguration": { - "target": "com.amazonaws.qconnect#AssistantCapabilityConfiguration", + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", "traits": { - "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant capability.

" + "smithy.api#documentation": "

The visibility status of the AI Prompt.

", + "smithy.api#required": {} + } + }, + "origin": { + "target": "com.amazonaws.qconnect#Origin", + "traits": { + "smithy.api#documentation": "

The origin of the AI Prompt. SYSTEM for a default AI Prompt created by Q in\n Connect or CUSTOMER for an AI Prompt created by calling AI Prompt creation APIs.\n

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the AI Prompt.

" + } + }, + "status": { + "target": "com.amazonaws.qconnect#Status", + "traits": { + "smithy.api#documentation": "

The status of the AI Prompt.

" + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" } } }, "traits": { - "smithy.api#documentation": "

Summary information about the assistant.

" + "smithy.api#documentation": "

The summary of the AI Prompt.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#AIPrompt" + } + ] } }, - "com.amazonaws.qconnect#AssistantType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AGENT", - "name": "AGENT" + "com.amazonaws.qconnect#AIPromptSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#AIPromptSummary" + } + }, + "com.amazonaws.qconnect#AIPromptTemplateConfiguration": { + "type": "union", + "members": { + "textFullAIPromptEditTemplateConfiguration": { + "target": "com.amazonaws.qconnect#TextFullAIPromptEditTemplateConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration for a prompt template that supports full textual prompt configuration\n using a YAML prompt.

" } - ] + } + }, + "traits": { + "smithy.api#documentation": "

A typed union that specifies the configuration for a prompt template based on its\n type.

" } }, - "com.amazonaws.qconnect#AssociationType": { + "com.amazonaws.qconnect#AIPromptTemplateType": { "type": "string", "traits": { "smithy.api#enum": [ { - "value": "KNOWLEDGE_BASE", - "name": "KNOWLEDGE_BASE" + "value": "TEXT", + "name": "TEXT" } ] } }, - "com.amazonaws.qconnect#Channel": { + "com.amazonaws.qconnect#AIPromptType": { "type": "string", "traits": { - "smithy.api#length": { - "min": 1, - "max": 10 - }, - "smithy.api#sensitive": {} + "smithy.api#enum": [ + { + "value": "ANSWER_GENERATION", + "name": "ANSWER_GENERATION" + }, + { + "value": "INTENT_LABELING_GENERATION", + "name": "INTENT_LABELING_GENERATION" + }, + { + "value": "QUERY_REFORMULATION", + "name": "QUERY_REFORMULATION" + } + ] } }, - "com.amazonaws.qconnect#Channels": { + "com.amazonaws.qconnect#AIPromptVersionSummariesList": { "type": "list", "member": { - "target": "com.amazonaws.qconnect#Channel" - }, - "traits": { - "smithy.api#uniqueItems": {} - } - }, - "com.amazonaws.qconnect#ClientToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 4096 - } + "target": "com.amazonaws.qconnect#AIPromptVersionSummary" } }, - "com.amazonaws.qconnect#Configuration": { - "type": "union", + "com.amazonaws.qconnect#AIPromptVersionSummary": { + "type": "structure", "members": { - "connectConfiguration": { - "target": "com.amazonaws.qconnect#ConnectConfiguration", + "aiPromptSummary": { + "target": "com.amazonaws.qconnect#AIPromptSummary", "traits": { - "smithy.api#documentation": "

The configuration information of the Amazon Connect data source.

" + "smithy.api#documentation": "

The date for the summary of the AI Prompt version.

" + } + }, + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", + "traits": { + "smithy.api#documentation": "

The version number for this AI Prompt version.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration information of the external data source.

" + "smithy.api#documentation": "

The summary of the AI Prompt version.

" } }, - "com.amazonaws.qconnect#ConflictException": { + "com.amazonaws.qconnect#AccessDeniedException": { "type": "structure", "members": { "message": { @@ -597,197 +727,226 @@ } }, "traits": { - "smithy.api#documentation": "

The request could not be processed because of conflict in the current state of the\n resource. For example, if you're using a Create API (such as\n CreateAssistant) that accepts name, a conflicting resource (usually with the\n same name) is being created or mutated.

", + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", "smithy.api#error": "client", - "smithy.api#httpError": 409 + "smithy.api#httpError": 403 } }, - "com.amazonaws.qconnect#ConnectConfiguration": { + "com.amazonaws.qconnect#AmazonConnectGuideAssociationData": { "type": "structure", "members": { - "instanceId": { - "target": "com.amazonaws.qconnect#NonEmptyString", + "flowId": { + "target": "com.amazonaws.qconnect#GenericArn", "traits": { - "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an Amazon Connect flow. Step-by-step guides are a type of flow.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration information of the Amazon Connect data source.

" + "smithy.api#documentation": "

Content association data for a step-by-step\n guide.

" } }, - "com.amazonaws.qconnect#ContactAttributeKey": { - "type": "string" - }, - "com.amazonaws.qconnect#ContactAttributeKeys": { + "com.amazonaws.qconnect#AndConditions": { "type": "list", "member": { - "target": "com.amazonaws.qconnect#ContactAttributeKey" + "target": "com.amazonaws.qconnect#TagCondition" + } + }, + "com.amazonaws.qconnect#AnswerRecommendationAIAgentConfiguration": { + "type": "structure", + "members": { + "intentLabelingGenerationAIPromptId": { + "target": "com.amazonaws.qconnect#UuidWithQualifier", + "traits": { + "smithy.api#documentation": "

The AI Prompt identifier for the Intent Labeling prompt used by the\n ANSWER_RECOMMENDATION AI Agent.

" + } + }, + "queryReformulationAIPromptId": { + "target": "com.amazonaws.qconnect#UuidWithQualifier", + "traits": { + "smithy.api#documentation": "

The AI Prompt identifier for the Query Reformulation prompt used by the\n ANSWER_RECOMMENDATION AI Agent.

" + } + }, + "answerGenerationAIPromptId": { + "target": "com.amazonaws.qconnect#UuidWithQualifier", + "traits": { + "smithy.api#documentation": "

The AI Prompt identifier for the Answer Generation prompt used by the\n ANSWER_RECOMMENDATION AI Agent.

" + } + }, + "associationConfigurations": { + "target": "com.amazonaws.qconnect#AssociationConfigurationList", + "traits": { + "smithy.api#documentation": "

The association configurations for overriding behavior on this AI Agent.

" + } + } }, "traits": { - "smithy.api#sensitive": {}, - "smithy.api#uniqueItems": {} + "smithy.api#documentation": "

The configuration for the ANSWER_RECOMMENDATION AI Agent type.

" } }, - "com.amazonaws.qconnect#ContactAttributeValue": { - "type": "string" - }, - "com.amazonaws.qconnect#ContactAttributes": { - "type": "map", - "key": { - "target": "com.amazonaws.qconnect#ContactAttributeKey" - }, - "value": { - "target": "com.amazonaws.qconnect#ContactAttributeValue" + "com.amazonaws.qconnect#AppIntegrationsConfiguration": { + "type": "structure", + "members": { + "appIntegrationArn": { + "target": "com.amazonaws.qconnect#GenericArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.

\n
    \n
  • \n

    For Salesforce, your AppIntegrations DataIntegration must have an ObjectConfiguration if\n objectFields is not provided, including at least Id,\n ArticleNumber, VersionNumber, Title,\n PublishStatus, and IsDeleted as source fields.

    \n
  • \n
  • \n

    For ServiceNow, your AppIntegrations DataIntegration must have an ObjectConfiguration if\n objectFields is not provided, including at least number,\n short_description, sys_mod_count, workflow_state,\n and active as source fields.

    \n
  • \n
  • \n

    For \n Zendesk, your AppIntegrations DataIntegration must have an ObjectConfiguration if\n objectFields is not provided, including at least id,\n title, updated_at, and draft as source fields.\n

    \n
  • \n
  • \n

    For SharePoint, your AppIntegrations DataIntegration must have a FileConfiguration,\n including only file extensions that are among docx, pdf,\n html, htm, and txt.

    \n
  • \n
  • \n

    For Amazon S3, the\n ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null.\n The SourceURI of your DataIntegration must use the following format:\n s3://your_s3_bucket_name.

    \n \n

    The bucket policy of the corresponding S3 bucket must allow the Amazon Web Services\n principal app-integrations.amazonaws.com to perform\n s3:ListBucket, s3:GetObject, and\n s3:GetBucketLocation against the bucket.

    \n
    \n
  • \n
", + "smithy.api#required": {} + } + }, + "objectFields": { + "target": "com.amazonaws.qconnect#ObjectFieldsList", + "traits": { + "smithy.api#documentation": "

The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if\n ObjectConfiguration is included in the provided DataIntegration.

\n
    \n
  • \n

    For Salesforce, you must include at least Id,\n ArticleNumber, VersionNumber, Title,\n PublishStatus, and IsDeleted.

    \n
  • \n
  • \n

    For ServiceNow, you must include at least number,\n short_description, sys_mod_count, workflow_state,\n and active.

    \n
  • \n
  • \n

    For \n Zendesk, you must include at least id, title,\n updated_at, and draft.

    \n
  • \n
\n

Make sure to include additional fields. These fields are indexed and used to source\n recommendations.

" + } + } }, "traits": { - "smithy.api#sensitive": {} + "smithy.api#documentation": "

Configuration information for Amazon AppIntegrations to automatically ingest content.

" } }, - "com.amazonaws.qconnect#Content": { + "com.amazonaws.qconnect#Arn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" + } + }, + "com.amazonaws.qconnect#Assistant": { "type": "resource", "identifiers": { - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#UuidOrArn" - }, - "contentId": { + "assistantId": { "target": "com.amazonaws.qconnect#UuidOrArn" } }, "create": { - "target": "com.amazonaws.qconnect#CreateContent" + "target": "com.amazonaws.qconnect#CreateAssistant" }, "read": { - "target": "com.amazonaws.qconnect#GetContent" - }, - "update": { - "target": "com.amazonaws.qconnect#UpdateContent" + "target": "com.amazonaws.qconnect#GetAssistant" }, "delete": { - "target": "com.amazonaws.qconnect#DeleteContent" + "target": "com.amazonaws.qconnect#DeleteAssistant" }, "list": { - "target": "com.amazonaws.qconnect#ListContents" + "target": "com.amazonaws.qconnect#ListAssistants" }, "operations": [ { - "target": "com.amazonaws.qconnect#GetContentSummary" + "target": "com.amazonaws.qconnect#GetRecommendations" + }, + { + "target": "com.amazonaws.qconnect#NotifyRecommendationsReceived" + }, + { + "target": "com.amazonaws.qconnect#PutFeedback" + }, + { + "target": "com.amazonaws.qconnect#QueryAssistant" + }, + { + "target": "com.amazonaws.qconnect#RemoveAssistantAIAgent" + }, + { + "target": "com.amazonaws.qconnect#SearchSessions" + }, + { + "target": "com.amazonaws.qconnect#UpdateAssistantAIAgent" } ], "resources": [ { - "target": "com.amazonaws.qconnect#ContentAssociation" + "target": "com.amazonaws.qconnect#AIAgent" + }, + { + "target": "com.amazonaws.qconnect#AIPrompt" + }, + { + "target": "com.amazonaws.qconnect#AssistantAssociation" + }, + { + "target": "com.amazonaws.qconnect#Session" } ], "traits": { "aws.api#arn": { - "template": "content/{knowledgeBaseId}/{contentId}" + "template": "assistant/{assistantId}" }, + "aws.cloudformation#cfnResource": {}, "aws.iam#disableConditionKeyInference": {} } }, - "com.amazonaws.qconnect#ContentAssociation": { + "com.amazonaws.qconnect#AssistantAssociation": { "type": "resource", "identifiers": { - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#UuidOrArn" - }, - "contentId": { + "assistantId": { "target": "com.amazonaws.qconnect#UuidOrArn" }, - "contentAssociationId": { + "assistantAssociationId": { "target": "com.amazonaws.qconnect#UuidOrArn" } }, "create": { - "target": "com.amazonaws.qconnect#CreateContentAssociation" + "target": "com.amazonaws.qconnect#CreateAssistantAssociation" }, "read": { - "target": "com.amazonaws.qconnect#GetContentAssociation" + "target": "com.amazonaws.qconnect#GetAssistantAssociation" }, "delete": { - "target": "com.amazonaws.qconnect#DeleteContentAssociation" + "target": "com.amazonaws.qconnect#DeleteAssistantAssociation" }, "list": { - "target": "com.amazonaws.qconnect#ListContentAssociations" + "target": "com.amazonaws.qconnect#ListAssistantAssociations" }, "traits": { "aws.api#arn": { - "template": "content-association/{knowledgeBaseId}/{contentId}/{contentAssociationId}" + "template": "association/{assistantId}/{assistantAssociationId}" }, + "aws.cloudformation#cfnResource": {}, "aws.iam#disableConditionKeyInference": {} } }, - "com.amazonaws.qconnect#ContentAssociationContents": { - "type": "union", - "members": { - "amazonConnectGuideAssociation": { - "target": "com.amazonaws.qconnect#AmazonConnectGuideAssociationData", - "traits": { - "smithy.api#documentation": "

The data of the step-by-step guide association.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The contents of a content association.

" - } - }, - "com.amazonaws.qconnect#ContentAssociationData": { + "com.amazonaws.qconnect#AssistantAssociationData": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#Uuid", - "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base.

", - "smithy.api#required": {} - } - }, - "knowledgeBaseArn": { - "target": "com.amazonaws.qconnect#Arn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", - "smithy.api#required": {} - } - }, - "contentId": { + "assistantAssociationId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#documentation": "

The identifier of the assistant association.

", "smithy.api#required": {} } }, - "contentArn": { + "assistantAssociationArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the assistant association.

", "smithy.api#required": {} } }, - "contentAssociationId": { + "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, - "contentAssociationArn": { + "assistantArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content association.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, "associationType": { - "target": "com.amazonaws.qconnect#ContentAssociationType", + "target": "com.amazonaws.qconnect#AssociationType", "traits": { "smithy.api#documentation": "

The type of association.

", "smithy.api#required": {} } }, "associationData": { - "target": "com.amazonaws.qconnect#ContentAssociationContents", + "target": "com.amazonaws.qconnect#AssistantAssociationOutputData", "traits": { - "smithy.api#documentation": "

The content association.

", + "smithy.api#documentation": "

A union type that currently has a single argument, the knowledge base ID.

", "smithy.api#required": {} } }, @@ -799,70 +958,84 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the content association.

", + "smithy.api#documentation": "

Information about the assistant association.

", "smithy.api#references": [ { - "resource": "com.amazonaws.qconnect#ContentAssociation" + "resource": "com.amazonaws.qconnect#AssistantAssociation" } ] } }, - "com.amazonaws.qconnect#ContentAssociationSummary": { - "type": "structure", - "members": { + "com.amazonaws.qconnect#AssistantAssociationInputData": { + "type": "union", + "members": { "knowledgeBaseId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base.

" } - }, - "knowledgeBaseArn": { - "target": "com.amazonaws.qconnect#Arn", + } + }, + "traits": { + "smithy.api#documentation": "

The data that is input into Amazon Q in Connect as a result of the assistant association.

" + } + }, + "com.amazonaws.qconnect#AssistantAssociationOutputData": { + "type": "union", + "members": { + "knowledgeBaseAssociation": { + "target": "com.amazonaws.qconnect#KnowledgeBaseAssociationData", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The knowledge base where output data is sent.

" } - }, - "contentId": { + } + }, + "traits": { + "smithy.api#documentation": "

The data that is output as a result of the assistant association.

" + } + }, + "com.amazonaws.qconnect#AssistantAssociationSummary": { + "type": "structure", + "members": { + "assistantAssociationId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#documentation": "

The identifier of the assistant association.

", "smithy.api#required": {} } }, - "contentArn": { + "assistantAssociationArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the assistant association.

", "smithy.api#required": {} } }, - "contentAssociationId": { + "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, - "contentAssociationArn": { + "assistantArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content association.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, "associationType": { - "target": "com.amazonaws.qconnect#ContentAssociationType", + "target": "com.amazonaws.qconnect#AssociationType", "traits": { "smithy.api#documentation": "

The type of association.

", "smithy.api#required": {} } }, "associationData": { - "target": "com.amazonaws.qconnect#ContentAssociationContents", + "target": "com.amazonaws.qconnect#AssistantAssociationOutputData", "traits": { - "smithy.api#documentation": "

The content association.

", + "smithy.api#documentation": "

The association data.

", "smithy.api#required": {} } }, @@ -874,332 +1047,1458 @@ } }, "traits": { - "smithy.api#documentation": "

Summary information about a content association.

", + "smithy.api#documentation": "

Summary information about the assistant association.

", "smithy.api#references": [ { - "resource": "com.amazonaws.qconnect#ContentAssociation" + "resource": "com.amazonaws.qconnect#AssistantAssociation" } ] } }, - "com.amazonaws.qconnect#ContentAssociationSummaryList": { + "com.amazonaws.qconnect#AssistantAssociationSummaryList": { "type": "list", "member": { - "target": "com.amazonaws.qconnect#ContentAssociationSummary" + "target": "com.amazonaws.qconnect#AssistantAssociationSummary" } }, - "com.amazonaws.qconnect#ContentAssociationType": { + "com.amazonaws.qconnect#AssistantCapabilityConfiguration": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.qconnect#AssistantCapabilityType", + "traits": { + "smithy.api#documentation": "

The type of Amazon Q in Connect assistant capability.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The capability configuration for an Amazon Q in Connect assistant.

" + } + }, + "com.amazonaws.qconnect#AssistantCapabilityType": { "type": "string", "traits": { "smithy.api#enum": [ { - "value": "AMAZON_CONNECT_GUIDE", - "name": "AMAZON_CONNECT_GUIDE" + "value": "V1", + "name": "V1" + }, + { + "value": "V2", + "name": "V2" } ] } }, - "com.amazonaws.qconnect#ContentData": { + "com.amazonaws.qconnect#AssistantData": { "type": "structure", "members": { - "contentArn": { - "target": "com.amazonaws.qconnect#Arn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", - "smithy.api#required": {} - } - }, - "contentId": { + "assistantId": { "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, - "knowledgeBaseArn": { + "assistantArn": { "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", - "smithy.api#required": {} - } - }, - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#Uuid", - "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, "name": { "target": "com.amazonaws.qconnect#Name", "traits": { - "smithy.api#documentation": "

The name of the content.

", + "smithy.api#documentation": "

The name.

", "smithy.api#required": {} } }, - "revisionId": { - "target": "com.amazonaws.qconnect#NonEmptyString", + "type": { + "target": "com.amazonaws.qconnect#AssistantType", "traits": { - "smithy.api#documentation": "

The identifier of the content revision.

", + "smithy.api#documentation": "

The type of assistant.

", "smithy.api#required": {} } }, - "title": { - "target": "com.amazonaws.qconnect#ContentTitle", + "status": { + "target": "com.amazonaws.qconnect#AssistantStatus", "traits": { - "smithy.api#documentation": "

The title of the content.

", + "smithy.api#documentation": "

The status of the assistant.

", "smithy.api#required": {} } }, - "contentType": { - "target": "com.amazonaws.qconnect#ContentType", + "description": { + "target": "com.amazonaws.qconnect#Description", "traits": { - "smithy.api#documentation": "

The media type of the content.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description.

" } }, - "status": { - "target": "com.amazonaws.qconnect#ContentStatus", + "tags": { + "target": "com.amazonaws.qconnect#Tags", "traits": { - "smithy.api#documentation": "

The status of the content.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" } }, - "metadata": { - "target": "com.amazonaws.qconnect#ContentMetadata", + "serverSideEncryptionConfiguration": { + "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", "traits": { - "smithy.api#documentation": "

A key/value map to store attributes without affecting tagging or recommendations. \nFor example, when synchronizing data between an external system and Amazon Q in Connect, you can store an external version identifier as metadata to utilize for determining drift.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration information for the customer managed key used for encryption.

\n

This KMS key must have a policy that allows kms:CreateGrant,\n kms:DescribeKey, kms:Decrypt, and\n kms:GenerateDataKey* permissions to the IAM identity using the\n key to invoke Amazon Q in Connect. To use Amazon Q in Connect with chat, the key policy must also allow\n kms:Decrypt, kms:GenerateDataKey*, and\n kms:DescribeKey permissions to the connect.amazonaws.com service\n principal.

\n

For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for\n your instance.

" } }, - "tags": { - "target": "com.amazonaws.qconnect#Tags", + "integrationConfiguration": { + "target": "com.amazonaws.qconnect#AssistantIntegrationConfiguration", "traits": { - "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant integration.

" } }, - "linkOutUri": { - "target": "com.amazonaws.qconnect#Uri", + "capabilityConfiguration": { + "target": "com.amazonaws.qconnect#AssistantCapabilityConfiguration", "traits": { - "smithy.api#documentation": "

The URI of the content.

" + "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant capability.

" } }, - "url": { - "target": "com.amazonaws.qconnect#Url", + "aiAgentConfiguration": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationMap", "traits": { - "smithy.api#documentation": "

The URL of the content.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that is\n set on the Amazon Q in Connect Assistant.

" } - }, - "urlExpiry": { - "target": "smithy.api#Timestamp", + } + }, + "traits": { + "smithy.api#documentation": "

The assistant data.

" + } + }, + "com.amazonaws.qconnect#AssistantIntegrationConfiguration": { + "type": "structure", + "members": { + "topicIntegrationArn": { + "target": "com.amazonaws.qconnect#GenericArn", "traits": { - "smithy.api#documentation": "

The expiration time of the URL as an epoch timestamp.

", - "smithy.api#required": {}, - "smithy.api#timestampFormat": "epoch-seconds" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the integrated Amazon SNS topic used for streaming chat messages.

" } } }, "traits": { - "smithy.api#documentation": "

Information about the content.

", - "smithy.api#references": [ + "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant integration.

" + } + }, + "com.amazonaws.qconnect#AssistantList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#AssistantSummary" + } + }, + "com.amazonaws.qconnect#AssistantStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ { - "resource": "com.amazonaws.qconnect#Content" + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" }, { - "resource": "com.amazonaws.qconnect#KnowledgeBase" + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "DELETED", + "name": "DELETED" } ] } }, - "com.amazonaws.qconnect#ContentDataDetails": { + "com.amazonaws.qconnect#AssistantSummary": { "type": "structure", "members": { - "textData": { - "target": "com.amazonaws.qconnect#TextData", + "assistantId": { + "target": "com.amazonaws.qconnect#Uuid", "traits": { - "smithy.api#documentation": "

Details about the content text data.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } }, - "rankingData": { - "target": "com.amazonaws.qconnect#RankingData", + "assistantArn": { + "target": "com.amazonaws.qconnect#Arn", "traits": { - "smithy.api#documentation": "

Details about the content ranking data.

", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Details about the content data.

" - } - }, - "com.amazonaws.qconnect#ContentFeedbackData": { - "type": "union", - "members": { - "generativeContentFeedbackData": { - "target": "com.amazonaws.qconnect#GenerativeContentFeedbackData", + }, + "name": { + "target": "com.amazonaws.qconnect#Name", "traits": { - "smithy.api#documentation": "

Information about the feedback for a generative target type.

" + "smithy.api#documentation": "

The name of the assistant.

", + "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Information about the feedback.

" - } - }, - "com.amazonaws.qconnect#ContentMetadata": { - "type": "map", + }, + "type": { + "target": "com.amazonaws.qconnect#AssistantType", + "traits": { + "smithy.api#documentation": "

The type of the assistant.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qconnect#AssistantStatus", + "traits": { + "smithy.api#documentation": "

The status of the assistant.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the assistant.

" + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + }, + "serverSideEncryptionConfiguration": { + "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information for the customer managed key used for encryption.

\n

This KMS key must have a policy that allows kms:CreateGrant,\n kms:DescribeKey, kms:Decrypt, and\n kms:GenerateDataKey* permissions to the IAM identity using the\n key to invoke Amazon Q in Connect. To use Amazon Q in Connect with chat, the key policy must also allow\n kms:Decrypt, kms:GenerateDataKey*, and\n kms:DescribeKey permissions to the connect.amazonaws.com service\n principal.

\n

For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for\n your instance.

" + } + }, + "integrationConfiguration": { + "target": "com.amazonaws.qconnect#AssistantIntegrationConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant integration.

" + } + }, + "capabilityConfiguration": { + "target": "com.amazonaws.qconnect#AssistantCapabilityConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information for the Amazon Q in Connect assistant capability.

" + } + }, + "aiAgentConfiguration": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationMap", + "traits": { + "smithy.api#documentation": "

The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that is\n set on the Amazon Q in Connect Assistant.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about the assistant.

" + } + }, + "com.amazonaws.qconnect#AssistantType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AGENT", + "name": "AGENT" + } + ] + } + }, + "com.amazonaws.qconnect#AssociationConfiguration": { + "type": "structure", + "members": { + "associationId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the association for this Association Configuration.

" + } + }, + "associationType": { + "target": "com.amazonaws.qconnect#AIAgentAssociationConfigurationType", + "traits": { + "smithy.api#documentation": "

The type of the association for this Association Configuration.

" + } + }, + "associationConfigurationData": { + "target": "com.amazonaws.qconnect#AssociationConfigurationData", + "traits": { + "smithy.api#documentation": "

The data of the configuration for an Amazon Q in Connect Assistant Association.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for an Amazon Q in Connect Assistant Association.

" + } + }, + "com.amazonaws.qconnect#AssociationConfigurationData": { + "type": "union", + "members": { + "knowledgeBaseAssociationConfigurationData": { + "target": "com.amazonaws.qconnect#KnowledgeBaseAssociationConfigurationData", + "traits": { + "smithy.api#documentation": "

The data of the configuration for a KNOWLEDGE_BASE type Amazon Q in Connect Assistant\n Association.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A typed union of the data of the configuration for an Amazon Q in Connect Assistant\n Association.

" + } + }, + "com.amazonaws.qconnect#AssociationConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#AssociationConfiguration" + } + }, + "com.amazonaws.qconnect#AssociationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "KNOWLEDGE_BASE", + "name": "KNOWLEDGE_BASE" + } + ] + } + }, + "com.amazonaws.qconnect#BedrockFoundationModelConfigurationForParsing": { + "type": "structure", + "members": { + "modelArn": { + "target": "com.amazonaws.qconnect#BedrockModelArnForParsing", + "traits": { + "smithy.api#documentation": "

The ARN of the foundation model.

", + "smithy.api#required": {} + } + }, + "parsingPrompt": { + "target": "com.amazonaws.qconnect#ParsingPrompt", + "traits": { + "smithy.api#documentation": "

Instructions for interpreting the contents of a document.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for a foundation model used to parse documents for a data source.

" + } + }, + "com.amazonaws.qconnect#BedrockModelArnForParsing": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}::foundation-model/anthropic.claude-3-haiku-20240307-v1:0$" + } + }, + "com.amazonaws.qconnect#Channel": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.qconnect#Channels": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#Channel" + }, + "traits": { + "smithy.api#uniqueItems": {} + } + }, + "com.amazonaws.qconnect#ChunkingConfiguration": { + "type": "structure", + "members": { + "chunkingStrategy": { + "target": "com.amazonaws.qconnect#ChunkingStrategy", + "traits": { + "smithy.api#documentation": "

Knowledge base can split your source data into chunks. A chunk refers to an excerpt from a\n data source that is returned when the knowledge base that it belongs to is queried. You have\n the following options for chunking your data. If you opt for NONE, then you may\n want to pre-process your files by splitting them up such that each file corresponds to a\n chunk.

", + "smithy.api#required": {} + } + }, + "fixedSizeChunkingConfiguration": { + "target": "com.amazonaws.qconnect#FixedSizeChunkingConfiguration", + "traits": { + "smithy.api#documentation": "

Configurations for when you choose fixed-size chunking. If you set the\n chunkingStrategy as NONE, exclude this field.

" + } + }, + "hierarchicalChunkingConfiguration": { + "target": "com.amazonaws.qconnect#HierarchicalChunkingConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for hierarchical document chunking for a data source. Hierarchical chunking\n splits documents into layers of chunks where the first layer contains large chunks, and the\n second layer contains smaller chunks derived from the first layer.

" + } + }, + "semanticChunkingConfiguration": { + "target": "com.amazonaws.qconnect#SemanticChunkingConfiguration", + "traits": { + "smithy.api#documentation": "

Settings for semantic document chunking for a data source. Semantic chunking splits a\n document into smaller documents based on groups of similar content derived from the text with\n natural language processing.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about how to chunk the documents in the data source. A chunk refers to an excerpt\n from a data source that is returned when the knowledge base that it belongs to is\n queried.

" + } + }, + "com.amazonaws.qconnect#ChunkingStrategy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FIXED_SIZE", + "name": "FIXED_SIZE" + }, + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "HIERARCHICAL", + "name": "HIERARCHICAL" + }, + { + "value": "SEMANTIC", + "name": "SEMANTIC" + } + ] + } + }, + "com.amazonaws.qconnect#CitationSpan": { + "type": "structure", + "members": { + "beginOffsetInclusive": { + "target": "com.amazonaws.qconnect#CitationSpanOffset", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

Where the text with a citation starts in the generated output.

" + } + }, + "endOffsetExclusive": { + "target": "com.amazonaws.qconnect#CitationSpanOffset", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

Where the text with a citation ends in the generated output.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information about where the text with a citation begins and ends in the generated\n output.

" + } + }, + "com.amazonaws.qconnect#CitationSpanOffset": { + "type": "integer", + "traits": { + "smithy.api#default": 0 + } + }, + "com.amazonaws.qconnect#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + } + } + }, + "com.amazonaws.qconnect#Configuration": { + "type": "union", + "members": { + "connectConfiguration": { + "target": "com.amazonaws.qconnect#ConnectConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information of the Amazon Connect data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration information of the external data source.

" + } + }, + "com.amazonaws.qconnect#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

The request could not be processed because of conflict in the current state of the\n resource. For example, if you're using a Create API (such as\n CreateAssistant) that accepts name, a conflicting resource (usually with the\n same name) is being created or mutated.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.qconnect#ConnectConfiguration": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Connect instance. You can find the instanceId in the ARN of the instance.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration information of the Amazon Connect data source.

" + } + }, + "com.amazonaws.qconnect#ContactAttributeKey": { + "type": "string" + }, + "com.amazonaws.qconnect#ContactAttributeKeys": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#ContactAttributeKey" + }, + "traits": { + "smithy.api#sensitive": {}, + "smithy.api#uniqueItems": {} + } + }, + "com.amazonaws.qconnect#ContactAttributeValue": { + "type": "string" + }, + "com.amazonaws.qconnect#ContactAttributes": { + "type": "map", + "key": { + "target": "com.amazonaws.qconnect#ContactAttributeKey" + }, + "value": { + "target": "com.amazonaws.qconnect#ContactAttributeValue" + }, + "traits": { + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.qconnect#Content": { + "type": "resource", + "identifiers": { + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#UuidOrArn" + }, + "contentId": { + "target": "com.amazonaws.qconnect#UuidOrArn" + } + }, + "create": { + "target": "com.amazonaws.qconnect#CreateContent" + }, + "read": { + "target": "com.amazonaws.qconnect#GetContent" + }, + "update": { + "target": "com.amazonaws.qconnect#UpdateContent" + }, + "delete": { + "target": "com.amazonaws.qconnect#DeleteContent" + }, + "list": { + "target": "com.amazonaws.qconnect#ListContents" + }, + "operations": [ + { + "target": "com.amazonaws.qconnect#GetContentSummary" + } + ], + "resources": [ + { + "target": "com.amazonaws.qconnect#ContentAssociation" + } + ], + "traits": { + "aws.api#arn": { + "template": "content/{knowledgeBaseId}/{contentId}" + }, + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.qconnect#ContentAssociation": { + "type": "resource", + "identifiers": { + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#UuidOrArn" + }, + "contentId": { + "target": "com.amazonaws.qconnect#UuidOrArn" + }, + "contentAssociationId": { + "target": "com.amazonaws.qconnect#UuidOrArn" + } + }, + "create": { + "target": "com.amazonaws.qconnect#CreateContentAssociation" + }, + "read": { + "target": "com.amazonaws.qconnect#GetContentAssociation" + }, + "delete": { + "target": "com.amazonaws.qconnect#DeleteContentAssociation" + }, + "list": { + "target": "com.amazonaws.qconnect#ListContentAssociations" + }, + "traits": { + "aws.api#arn": { + "template": "content-association/{knowledgeBaseId}/{contentId}/{contentAssociationId}" + }, + "aws.iam#disableConditionKeyInference": {} + } + }, + "com.amazonaws.qconnect#ContentAssociationContents": { + "type": "union", + "members": { + "amazonConnectGuideAssociation": { + "target": "com.amazonaws.qconnect#AmazonConnectGuideAssociationData", + "traits": { + "smithy.api#documentation": "

The data of the step-by-step guide association.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The contents of a content association.

" + } + }, + "com.amazonaws.qconnect#ContentAssociationData": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#required": {} + } + }, + "contentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#required": {} + } + }, + "contentArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", + "smithy.api#required": {} + } + }, + "contentAssociationId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "smithy.api#required": {} + } + }, + "contentAssociationArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content association.

", + "smithy.api#required": {} + } + }, + "associationType": { + "target": "com.amazonaws.qconnect#ContentAssociationType", + "traits": { + "smithy.api#documentation": "

The type of association.

", + "smithy.api#required": {} + } + }, + "associationData": { + "target": "com.amazonaws.qconnect#ContentAssociationContents", + "traits": { + "smithy.api#documentation": "

The content association.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the content association.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#ContentAssociation" + } + ] + } + }, + "com.amazonaws.qconnect#ContentAssociationSummary": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#required": {} + } + }, + "contentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#required": {} + } + }, + "contentArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", + "smithy.api#required": {} + } + }, + "contentAssociationId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "smithy.api#required": {} + } + }, + "contentAssociationArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content association.

", + "smithy.api#required": {} + } + }, + "associationType": { + "target": "com.amazonaws.qconnect#ContentAssociationType", + "traits": { + "smithy.api#documentation": "

The type of association.

", + "smithy.api#required": {} + } + }, + "associationData": { + "target": "com.amazonaws.qconnect#ContentAssociationContents", + "traits": { + "smithy.api#documentation": "

The content association.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about a content association.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#ContentAssociation" + } + ] + } + }, + "com.amazonaws.qconnect#ContentAssociationSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#ContentAssociationSummary" + } + }, + "com.amazonaws.qconnect#ContentAssociationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AMAZON_CONNECT_GUIDE", + "name": "AMAZON_CONNECT_GUIDE" + } + ] + } + }, + "com.amazonaws.qconnect#ContentData": { + "type": "structure", + "members": { + "contentArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", + "smithy.api#required": {} + } + }, + "contentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the knowledge base.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.qconnect#Name", + "traits": { + "smithy.api#documentation": "

The name of the content.

", + "smithy.api#required": {} + } + }, + "revisionId": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the content revision.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qconnect#ContentTitle", + "traits": { + "smithy.api#documentation": "

The title of the content.

", + "smithy.api#required": {} + } + }, + "contentType": { + "target": "com.amazonaws.qconnect#ContentType", + "traits": { + "smithy.api#documentation": "

The media type of the content.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qconnect#ContentStatus", + "traits": { + "smithy.api#documentation": "

The status of the content.

", + "smithy.api#required": {} + } + }, + "metadata": { + "target": "com.amazonaws.qconnect#ContentMetadata", + "traits": { + "smithy.api#documentation": "

A key/value map to store attributes without affecting tagging or recommendations. \nFor example, when synchronizing data between an external system and Amazon Q in Connect, you can store an external version identifier as metadata to utilize for determining drift.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + }, + "linkOutUri": { + "target": "com.amazonaws.qconnect#Uri", + "traits": { + "smithy.api#documentation": "

The URI of the content.

" + } + }, + "url": { + "target": "com.amazonaws.qconnect#Url", + "traits": { + "smithy.api#documentation": "

The URL of the content.

", + "smithy.api#required": {} + } + }, + "urlExpiry": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The expiration time of the URL as an epoch timestamp.

", + "smithy.api#required": {}, + "smithy.api#timestampFormat": "epoch-seconds" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the content.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#Content" + }, + { + "resource": "com.amazonaws.qconnect#KnowledgeBase" + } + ] + } + }, + "com.amazonaws.qconnect#ContentDataDetails": { + "type": "structure", + "members": { + "textData": { + "target": "com.amazonaws.qconnect#TextData", + "traits": { + "smithy.api#documentation": "

Details about the content text data.

", + "smithy.api#required": {} + } + }, + "rankingData": { + "target": "com.amazonaws.qconnect#RankingData", + "traits": { + "smithy.api#documentation": "

Details about the content ranking data.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the content data.

" + } + }, + "com.amazonaws.qconnect#ContentFeedbackData": { + "type": "union", + "members": { + "generativeContentFeedbackData": { + "target": "com.amazonaws.qconnect#GenerativeContentFeedbackData", + "traits": { + "smithy.api#documentation": "

Information about the feedback for a generative target type.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the feedback.

" + } + }, + "com.amazonaws.qconnect#ContentMetadata": { + "type": "map", "key": { "target": "com.amazonaws.qconnect#NonEmptyString" }, - "value": { - "target": "com.amazonaws.qconnect#NonEmptyString" + "value": { + "target": "com.amazonaws.qconnect#NonEmptyString" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.qconnect#ContentReference": { + "type": "structure", + "members": { + "knowledgeBaseArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

" + } + }, + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base.

" + } + }, + "contentArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

" + } + }, + "contentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content.

" + } + }, + "sourceURL": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The web URL of the source content.

" + } + }, + "referenceType": { + "target": "com.amazonaws.qconnect#ReferenceType", + "traits": { + "smithy.api#documentation": "

The type of reference content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Reference information about the content.

" + } + }, + "com.amazonaws.qconnect#ContentStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" + } + ] + } + }, + "com.amazonaws.qconnect#ContentSummary": { + "type": "structure", + "members": { + "contentArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", + "smithy.api#required": {} + } + }, + "contentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the content.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.qconnect#Name", + "traits": { + "smithy.api#documentation": "

The name of the content.

", + "smithy.api#required": {} + } + }, + "revisionId": { + "target": "com.amazonaws.qconnect#NonEmptyString", + "traits": { + "smithy.api#documentation": "

The identifier of the revision of the content.

", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.qconnect#ContentTitle", + "traits": { + "smithy.api#documentation": "

The title of the content.

", + "smithy.api#required": {} + } + }, + "contentType": { + "target": "com.amazonaws.qconnect#ContentType", + "traits": { + "smithy.api#documentation": "

The media type of the content.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qconnect#ContentStatus", + "traits": { + "smithy.api#documentation": "

The status of the content.

", + "smithy.api#required": {} + } + }, + "metadata": { + "target": "com.amazonaws.qconnect#ContentMetadata", + "traits": { + "smithy.api#documentation": "

A key/value map to store attributes without affecting tagging or recommendations. \nFor example, when synchronizing data between an external system and Amazon Q in Connect, you can store an external version identifier as metadata to utilize for determining drift.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about the content.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#Content" + }, + { + "resource": "com.amazonaws.qconnect#KnowledgeBase" + } + ] + } + }, + "com.amazonaws.qconnect#ContentSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#ContentSummary" + } + }, + "com.amazonaws.qconnect#ContentTitle": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.qconnect#ContentType": { + "type": "string", + "traits": { + "smithy.api#pattern": "^(text/(plain|html|csv))|(application/(pdf|vnd\\.openxmlformats-officedocument\\.wordprocessingml\\.document))|(application/x\\.wisdom-json;source=(salesforce|servicenow|zendesk))$" + } + }, + "com.amazonaws.qconnect#CreateAIAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#CreateAIAgentRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#CreateAIAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon Q in Connect AI Agent.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#CreateAIAgentRequest": { + "type": "structure", + "members": { + "clientToken": { + "target": "com.amazonaws.qconnect#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} + } + }, + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.qconnect#Name", + "traits": { + "smithy.api#documentation": "

The name of the AI Agent.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.qconnect#AIAgentType", + "traits": { + "smithy.api#documentation": "

The type of the AI Agent.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.qconnect#AIAgentConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the AI Agent.

", + "smithy.api#required": {} + } + }, + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", + "traits": { + "smithy.api#documentation": "

The visibility status of the AI Agent.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the AI Agent.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#CreateAIAgentResponse": { + "type": "structure", + "members": { + "aiAgent": { + "target": "com.amazonaws.qconnect#AIAgentData", + "traits": { + "smithy.api#documentation": "

The data of the created AI Agent.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#CreateAIAgentVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#CreateAIAgentVersionRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#CreateAIAgentVersionResponse" }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 10 + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" } + ], + "traits": { + "smithy.api#documentation": "

Creates and Amazon Q in Connect AI Agent version.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents/{aiAgentId}/versions", + "method": "POST" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.qconnect#ContentReference": { + "com.amazonaws.qconnect#CreateAIAgentVersionRequest": { "type": "structure", "members": { - "knowledgeBaseArn": { - "target": "com.amazonaws.qconnect#Arn", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

" + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#Uuid", + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base.

" + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "contentArn": { - "target": "com.amazonaws.qconnect#Arn", + "modifiedTime": { + "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

" + "smithy.api#documentation": "

The modification time of the AI Agent should be tracked for version creation. This field\n should be specified to avoid version creation when simultaneous update to the underlying AI\n Agent are possible. The value should be the modifiedTime returned from the request to create\n or update an AI Agent so that version creation can fail if an update to the AI Agent post the\n specified modification time has been made.

" } }, - "contentId": { - "target": "com.amazonaws.qconnect#Uuid", + "clientToken": { + "target": "com.amazonaws.qconnect#ClientToken", "traits": { - "smithy.api#documentation": "

The identifier of the content.

" + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} } } }, "traits": { - "smithy.api#documentation": "

Reference information about the content.

" - } - }, - "com.amazonaws.qconnect#ContentStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "CREATE_IN_PROGRESS", - "name": "CREATE_IN_PROGRESS" - }, - { - "value": "CREATE_FAILED", - "name": "CREATE_FAILED" - }, - { - "value": "ACTIVE", - "name": "ACTIVE" - }, - { - "value": "DELETE_IN_PROGRESS", - "name": "DELETE_IN_PROGRESS" - }, - { - "value": "DELETE_FAILED", - "name": "DELETE_FAILED" - }, - { - "value": "DELETED", - "name": "DELETED" - }, - { - "value": "UPDATE_FAILED", - "name": "UPDATE_FAILED" - } - ] + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#ContentSummary": { + "com.amazonaws.qconnect#CreateAIAgentVersionResponse": { "type": "structure", "members": { - "contentArn": { - "target": "com.amazonaws.qconnect#Arn", + "aiAgent": { + "target": "com.amazonaws.qconnect#AIAgentData", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the content.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The data of the AI Agent version.

" } }, - "contentId": { - "target": "com.amazonaws.qconnect#Uuid", + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", "traits": { - "smithy.api#documentation": "

The identifier of the content.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The version number of the AI Agent version.

" } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#CreateAIPrompt": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#CreateAIPromptRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#CreateAIPromptResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" }, - "knowledgeBaseArn": { - "target": "com.amazonaws.qconnect#Arn", + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon Q in Connect AI Prompt.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#CreateAIPromptRequest": { + "type": "structure", + "members": { + "clientToken": { + "target": "com.amazonaws.qconnect#ClientToken", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} } }, - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#Uuid", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base. This should not be a QUICK_RESPONSES type knowledge base.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, "name": { "target": "com.amazonaws.qconnect#Name", "traits": { - "smithy.api#documentation": "

The name of the content.

", + "smithy.api#documentation": "

The name of the AI Prompt.

", "smithy.api#required": {} } }, - "revisionId": { - "target": "com.amazonaws.qconnect#NonEmptyString", + "type": { + "target": "com.amazonaws.qconnect#AIPromptType", "traits": { - "smithy.api#documentation": "

The identifier of the revision of the content.

", + "smithy.api#documentation": "

The type of this AI Prompt.

", "smithy.api#required": {} } }, - "title": { - "target": "com.amazonaws.qconnect#ContentTitle", + "templateConfiguration": { + "target": "com.amazonaws.qconnect#AIPromptTemplateConfiguration", "traits": { - "smithy.api#documentation": "

The title of the content.

", + "smithy.api#documentation": "

The configuration of the prompt template for this AI Prompt.

", "smithy.api#required": {} } }, - "contentType": { - "target": "com.amazonaws.qconnect#ContentType", + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", "traits": { - "smithy.api#documentation": "

The media type of the content.

", + "smithy.api#documentation": "

The visibility status of the AI Prompt.

", "smithy.api#required": {} } }, - "status": { - "target": "com.amazonaws.qconnect#ContentStatus", + "templateType": { + "target": "com.amazonaws.qconnect#AIPromptTemplateType", "traits": { - "smithy.api#documentation": "

The status of the content.

", + "smithy.api#documentation": "

The type of the prompt template for this AI Prompt.

", "smithy.api#required": {} } }, - "metadata": { - "target": "com.amazonaws.qconnect#ContentMetadata", + "modelId": { + "target": "com.amazonaws.qconnect#AIPromptModelIdentifier", "traits": { - "smithy.api#documentation": "

A key/value map to store attributes without affecting tagging or recommendations. \nFor example, when synchronizing data between an external system and Amazon Q in Connect, you can store an external version identifier as metadata to utilize for determining drift.

", + "smithy.api#documentation": "

The identifier of the model used for this AI Prompt. Model Ids supported are:\n CLAUDE_3_HAIKU_20240307_V1\n

", + "smithy.api#required": {} + } + }, + "apiFormat": { + "target": "com.amazonaws.qconnect#AIPromptAPIFormat", + "traits": { + "smithy.api#documentation": "

The API Format of the AI Prompt.

", "smithy.api#required": {} } }, @@ -1208,39 +2507,124 @@ "traits": { "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the AI Prompt.

" + } } }, "traits": { - "smithy.api#documentation": "

Summary information about the content.

", - "smithy.api#references": [ - { - "resource": "com.amazonaws.qconnect#Content" - }, - { - "resource": "com.amazonaws.qconnect#KnowledgeBase" + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#CreateAIPromptResponse": { + "type": "structure", + "members": { + "aiPrompt": { + "target": "com.amazonaws.qconnect#AIPromptData", + "traits": { + "smithy.api#documentation": "

The data of the AI Prompt.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#CreateAIPromptVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#CreateAIPromptVersionRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#CreateAIPromptVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates an Amazon Q in Connect AI Prompt version.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts/{aiPromptId}/versions", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#CreateAIPromptVersionRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "modifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The time the AI Prompt was last modified.

" + } + }, + "clientToken": { + "target": "com.amazonaws.qconnect#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} } - ] - } - }, - "com.amazonaws.qconnect#ContentSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.qconnect#ContentSummary" - } - }, - "com.amazonaws.qconnect#ContentTitle": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 255 } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#ContentType": { - "type": "string", + "com.amazonaws.qconnect#CreateAIPromptVersionResponse": { + "type": "structure", + "members": { + "aiPrompt": { + "target": "com.amazonaws.qconnect#AIPromptData", + "traits": { + "smithy.api#documentation": "

The data of the AI Prompt version.

" + } + }, + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", + "traits": { + "smithy.api#documentation": "

The version number of the AI Prompt version.

" + } + } + }, "traits": { - "smithy.api#pattern": "^(text/(plain|html|csv))|(application/(pdf|vnd\\.openxmlformats-officedocument\\.wordprocessingml\\.document))|(application/x\\.wisdom-json;source=(salesforce|servicenow|zendesk))$" + "smithy.api#output": {} } }, "com.amazonaws.qconnect#CreateAssistant": { @@ -1477,7 +2861,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an association between a content resource in a knowledge base and step-by-step\n guides. Step-by-step guides offer instructions to agents for resolving common\n customer issues. You create a content association to integrate Amazon Q in Connect and step-by-step\n guides.

\n

After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a\n recommendation to an agent based on the intent that it's detected, it also provides them with\n the option to start the step-by-step guide that you have associated with the content.

\n

Note the following limitations:

\n
    \n
  • \n

    You can create only one content association for each\n content resource in a knowledge base.

    \n
  • \n
  • \n

    You can associate a step-by-step guide with multiple content resources.

    \n
  • \n
\n

For more information, see Integrate Amazon Q in Connect with\n step-by-step guides in the Amazon Connect Administrator\n Guide.

", + "smithy.api#documentation": "

Creates an association between a content resource in a knowledge base and step-by-step guides. Step-by-step guides offer instructions to agents for resolving\n common customer issues. You create a content association to integrate Amazon Q in Connect and\n step-by-step guides.

\n

After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a\n recommendation to an agent based on the intent that it's detected, it also provides them with\n the option to start the step-by-step guide that you have associated with the content.

\n

Note the following limitations:

\n
    \n
  • \n

    You can create only one content association for each content resource in a knowledge\n base.

    \n
  • \n
  • \n

    You can associate a step-by-step guide with multiple content resources.

    \n
  • \n
\n

For more information, see Integrate Amazon Q in Connect with\n step-by-step guides in the Amazon Connect Administrator\n Guide.

", "smithy.api#http": { "uri": "/knowledgeBases/{knowledgeBaseId}/contents/{contentId}/associations", "method": "POST" @@ -1685,6 +3069,12 @@ "smithy.api#documentation": "

Information about how to render the content.

" } }, + "vectorIngestionConfiguration": { + "target": "com.amazonaws.qconnect#VectorIngestionConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about how to ingest the documents in a data source.

" + } + }, "serverSideEncryptionConfiguration": { "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", "traits": { @@ -1917,94 +3307,380 @@ "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" } }, - "tagFilter": { - "target": "com.amazonaws.qconnect#TagFilter", - "traits": { - "smithy.api#documentation": "

An object that can be used to specify Tag conditions.

" - } + "tagFilter": { + "target": "com.amazonaws.qconnect#TagFilter", + "traits": { + "smithy.api#documentation": "

An object that can be used to specify Tag conditions.

" + } + }, + "aiAgentConfiguration": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationMap", + "traits": { + "smithy.api#documentation": "

The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that\n should be used by Amazon Q in Connect for this Session.

" + } + } + } + }, + "com.amazonaws.qconnect#CreateSessionResponse": { + "type": "structure", + "members": { + "session": { + "target": "com.amazonaws.qconnect#SessionData", + "traits": { + "smithy.api#documentation": "

The session.

" + } + } + } + }, + "com.amazonaws.qconnect#DataDetails": { + "type": "union", + "members": { + "contentData": { + "target": "com.amazonaws.qconnect#ContentDataDetails", + "traits": { + "smithy.api#documentation": "

Details about the content data.

" + } + }, + "generativeData": { + "target": "com.amazonaws.qconnect#GenerativeDataDetails", + "traits": { + "smithy.api#documentation": "

Details about the generative data.

" + } + }, + "intentDetectedData": { + "target": "com.amazonaws.qconnect#IntentDetectedDataDetails", + "traits": { + "smithy.api#documentation": "

Details about the intent data.

" + } + }, + "sourceContentData": { + "target": "com.amazonaws.qconnect#SourceContentDataDetails", + "traits": { + "smithy.api#documentation": "

Details about the content data.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the data.

" + } + }, + "com.amazonaws.qconnect#DataReference": { + "type": "union", + "members": { + "contentReference": { + "target": "com.amazonaws.qconnect#ContentReference" + }, + "generativeReference": { + "target": "com.amazonaws.qconnect#GenerativeReference", + "traits": { + "smithy.api#documentation": "

Reference information about the generative content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Reference data.

" + } + }, + "com.amazonaws.qconnect#DataSummary": { + "type": "structure", + "members": { + "reference": { + "target": "com.amazonaws.qconnect#DataReference", + "traits": { + "smithy.api#documentation": "

Reference information about the content.

", + "smithy.api#required": {} + } + }, + "details": { + "target": "com.amazonaws.qconnect#DataDetails", + "traits": { + "smithy.api#documentation": "

Details about the data.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary of the data.

" + } + }, + "com.amazonaws.qconnect#DataSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#DataSummary" + } + }, + "com.amazonaws.qconnect#DeleteAIAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#DeleteAIAgentRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#DeleteAIAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Amazon Q in Connect AI Agent.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents/{aiAgentId}", + "method": "DELETE", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#DeleteAIAgentRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Agent. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#DeleteAIAgentResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#DeleteAIAgentVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#DeleteAIAgentVersionRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#DeleteAIAgentVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Amazon Q in Connect AI Agent Version.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents/{aiAgentId}/versions/{versionNumber}", + "method": "DELETE", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#DeleteAIAgentVersionRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Agent. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", + "traits": { + "smithy.api#documentation": "

The version number of the AI Agent version.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#DeleteAIAgentVersionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#DeleteAIPrompt": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#DeleteAIPromptRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#DeleteAIPromptResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" } + ], + "traits": { + "smithy.api#documentation": "

Deletes an Amazon Q in Connect AI Prompt.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts/{aiPromptId}", + "method": "DELETE", + "code": 204 + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.qconnect#CreateSessionResponse": { + "com.amazonaws.qconnect#DeleteAIPromptRequest": { "type": "structure", "members": { - "session": { - "target": "com.amazonaws.qconnect#SessionData", - "traits": { - "smithy.api#documentation": "

The session.

" - } - } - } - }, - "com.amazonaws.qconnect#DataDetails": { - "type": "union", - "members": { - "contentData": { - "target": "com.amazonaws.qconnect#ContentDataDetails", - "traits": { - "smithy.api#documentation": "

Details about the content data.

" - } - }, - "generativeData": { - "target": "com.amazonaws.qconnect#GenerativeDataDetails", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

Details about the generative data.

" + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "sourceContentData": { - "target": "com.amazonaws.qconnect#SourceContentDataDetails", + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", "traits": { - "smithy.api#documentation": "

Details about the content data.

" + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Details about the data.

" + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#DataReference": { - "type": "union", - "members": { - "contentReference": { - "target": "com.amazonaws.qconnect#ContentReference" + "com.amazonaws.qconnect#DeleteAIPromptResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#DeleteAIPromptVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#DeleteAIPromptVersionRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#DeleteAIPromptVersionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" }, - "generativeReference": { - "target": "com.amazonaws.qconnect#GenerativeReference", - "traits": { - "smithy.api#documentation": "

Reference information about the generative content.

" - } + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" } - }, + ], "traits": { - "smithy.api#documentation": "

Reference data.

" + "smithy.api#documentation": "

Delete and Amazon Q in Connect AI Prompt version.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts/{aiPromptId}/versions/{versionNumber}", + "method": "DELETE", + "code": 204 + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.qconnect#DataSummary": { + "com.amazonaws.qconnect#DeleteAIPromptVersionRequest": { "type": "structure", "members": { - "reference": { - "target": "com.amazonaws.qconnect#DataReference", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

Reference information about the content.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "details": { - "target": "com.amazonaws.qconnect#DataDetails", + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", "traits": { - "smithy.api#documentation": "

Details about the data.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", + "traits": { + "smithy.api#documentation": "

The version number of the AI Prompt version to be deleted.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Summary of the data.

" + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#DataSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.qconnect#DataSummary" + "com.amazonaws.qconnect#DeleteAIPromptVersionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.qconnect#DeleteAssistant": { @@ -2491,6 +4167,12 @@ "smithy.api#documentation": "

The configuration information of the external data source.

" } }, + "com.amazonaws.qconnect#FailureReason": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#NonEmptyString" + } + }, "com.amazonaws.qconnect#Filter": { "type": "structure", "members": { @@ -2548,6 +4230,35 @@ ] } }, + "com.amazonaws.qconnect#FixedSizeChunkingConfiguration": { + "type": "structure", + "members": { + "maxTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens to include in a chunk.

", + "smithy.api#range": { + "min": 1 + }, + "smithy.api#required": {} + } + }, + "overlapPercentage": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The percentage of overlap between adjacent chunks of a data source.

", + "smithy.api#range": { + "min": 1, + "max": 99 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Configurations for when you choose fixed-size chunking. If you set the\n chunkingStrategy as NONE, exclude this field.

" + } + }, "com.amazonaws.qconnect#GenerativeContentFeedbackData": { "type": "structure", "members": { @@ -2560,66 +4271,216 @@ } }, "traits": { - "smithy.api#documentation": "

The feedback information for a generative target type.

" + "smithy.api#documentation": "

The feedback information for a generative target type.

" + } + }, + "com.amazonaws.qconnect#GenerativeDataDetails": { + "type": "structure", + "members": { + "completion": { + "target": "com.amazonaws.qconnect#SensitiveString", + "traits": { + "smithy.api#documentation": "

The LLM response.

", + "smithy.api#required": {} + } + }, + "references": { + "target": "com.amazonaws.qconnect#DataSummaryList", + "traits": { + "smithy.api#documentation": "

The references used to generative the LLM response.

", + "smithy.api#required": {} + } + }, + "rankingData": { + "target": "com.amazonaws.qconnect#RankingData", + "traits": { + "smithy.api#documentation": "

Details about the generative content ranking data.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about generative data.

" + } + }, + "com.amazonaws.qconnect#GenerativeReference": { + "type": "structure", + "members": { + "modelId": { + "target": "com.amazonaws.qconnect#LlmModelId", + "traits": { + "smithy.api#documentation": "

The identifier of the LLM model.

" + } + }, + "generationId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the LLM model.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Reference information about generative content.

" + } + }, + "com.amazonaws.qconnect#GenericArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$" + } + }, + "com.amazonaws.qconnect#GetAIAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#GetAIAgentRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#GetAIAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets an Amazon Q in Connect AI Agent.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents/{aiAgentId}", + "method": "GET" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qconnect#GetAIAgentRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Agent (with or without a version qualifier).\n Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#GetAIAgentResponse": { + "type": "structure", + "members": { + "aiAgent": { + "target": "com.amazonaws.qconnect#AIAgentData", + "traits": { + "smithy.api#documentation": "

The data of the AI Agent.

" + } + }, + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", + "traits": { + "smithy.api#documentation": "

The version number of the AI Agent version (returned if an AI Agent version was specified\n via use of a qualifier for the aiAgentId on the request).

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#GetAIPrompt": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#GetAIPromptRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#GetAIPromptResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets and Amazon Q in Connect AI Prompt.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts/{aiPromptId}", + "method": "GET" + }, + "smithy.api#readonly": {} } }, - "com.amazonaws.qconnect#GenerativeDataDetails": { + "com.amazonaws.qconnect#GetAIPromptRequest": { "type": "structure", "members": { - "completion": { - "target": "com.amazonaws.qconnect#SensitiveString", - "traits": { - "smithy.api#documentation": "

The LLM response.

", - "smithy.api#required": {} - } - }, - "references": { - "target": "com.amazonaws.qconnect#DataSummaryList", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

The references used to generative the LLM response.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "rankingData": { - "target": "com.amazonaws.qconnect#RankingData", + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", "traits": { - "smithy.api#documentation": "

Details about the generative content ranking data.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

Details about generative data.

" + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#GenerativeReference": { + "com.amazonaws.qconnect#GetAIPromptResponse": { "type": "structure", "members": { - "modelId": { - "target": "com.amazonaws.qconnect#LlmModelId", + "aiPrompt": { + "target": "com.amazonaws.qconnect#AIPromptData", "traits": { - "smithy.api#documentation": "

The identifier of the LLM model.

" + "smithy.api#documentation": "

The data of the AI Prompt.

" } }, - "generationId": { - "target": "com.amazonaws.qconnect#Uuid", + "versionNumber": { + "target": "com.amazonaws.qconnect#Version", "traits": { - "smithy.api#documentation": "

The identifier of the LLM model.

" + "smithy.api#documentation": "

The version number of the AI Prompt version (returned if an AI Prompt version was\n specified via use of a qualifier for the aiPromptId on the request).

" } } }, "traits": { - "smithy.api#documentation": "

Reference information about generative content.

" - } - }, - "com.amazonaws.qconnect#GenericArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 2048 - }, - "smithy.api#pattern": "^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$" + "smithy.api#output": {} } }, "com.amazonaws.qconnect#GetAssistant": { @@ -3315,6 +5176,62 @@ "target": "com.amazonaws.qconnect#NonEmptyString" } }, + "com.amazonaws.qconnect#HierarchicalChunkingConfiguration": { + "type": "structure", + "members": { + "levelConfigurations": { + "target": "com.amazonaws.qconnect#HierarchicalChunkingLevelConfigurations", + "traits": { + "smithy.api#documentation": "

Token settings for each layer.

", + "smithy.api#required": {} + } + }, + "overlapTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The number of tokens to repeat across chunks in the same layer.

", + "smithy.api#range": { + "min": 1 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for hierarchical document chunking for a data source. Hierarchical chunking\n splits documents into layers of chunks where the first layer contains large chunks, and the\n second layer contains smaller chunks derived from the first layer.

" + } + }, + "com.amazonaws.qconnect#HierarchicalChunkingLevelConfiguration": { + "type": "structure", + "members": { + "maxTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens that a chunk can contain in this layer.

", + "smithy.api#range": { + "min": 1, + "max": 8192 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Token settings for each layer.

" + } + }, + "com.amazonaws.qconnect#HierarchicalChunkingLevelConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#HierarchicalChunkingLevelConfiguration" + }, + "traits": { + "smithy.api#length": { + "min": 2, + "max": 2 + } + } + }, "com.amazonaws.qconnect#Highlight": { "type": "structure", "members": { @@ -3571,6 +5488,43 @@ ] } }, + "com.amazonaws.qconnect#IntentDetectedDataDetails": { + "type": "structure", + "members": { + "intent": { + "target": "com.amazonaws.qconnect#SensitiveString", + "traits": { + "smithy.api#documentation": "

The detected intent.

", + "smithy.api#required": {} + } + }, + "intentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the detected intent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the detected intent.

" + } + }, + "com.amazonaws.qconnect#IntentInputData": { + "type": "structure", + "members": { + "intentId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q intent.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the Amazon Q intent.

" + } + }, "com.amazonaws.qconnect#KnowledgeBase": { "type": "resource", "identifiers": { @@ -3635,6 +5589,29 @@ "aws.iam#disableConditionKeyInference": {} } }, + "com.amazonaws.qconnect#KnowledgeBaseAssociationConfigurationData": { + "type": "structure", + "members": { + "contentTagFilter": { + "target": "com.amazonaws.qconnect#TagFilter" + }, + "maxResults": { + "target": "com.amazonaws.qconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

" + } + }, + "overrideKnowledgeBaseSearchType": { + "target": "com.amazonaws.qconnect#KnowledgeBaseSearchType", + "traits": { + "smithy.api#documentation": "

The search type to be used against the Knowledge Base for this request. The values can be\n SEMANTIC which uses vector embeddings or HYBRID which use vector\n embeddings and raw text

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The data of the configuration for a KNOWLEDGE_BASE type Amazon Q in Connect\n Assistant Association.

" + } + }, "com.amazonaws.qconnect#KnowledgeBaseAssociationData": { "type": "structure", "members": { @@ -3649,13 +5626,168 @@ "traits": { "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

" } - } - }, + } + }, + "traits": { + "smithy.api#documentation": "

Association information about the knowledge base.

" + } + }, + "com.amazonaws.qconnect#KnowledgeBaseData": { + "type": "structure", + "members": { + "knowledgeBaseId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.qconnect#Name", + "traits": { + "smithy.api#documentation": "

The name of the knowledge base.

", + "smithy.api#required": {} + } + }, + "knowledgeBaseType": { + "target": "com.amazonaws.qconnect#KnowledgeBaseType", + "traits": { + "smithy.api#documentation": "

The type of knowledge base.

", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.qconnect#KnowledgeBaseStatus", + "traits": { + "smithy.api#documentation": "

The status of the knowledge base.

", + "smithy.api#required": {} + } + }, + "lastContentModificationTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

An epoch timestamp indicating the most recent content modification inside the knowledge\n base. If no content exists in a knowledge base, this value is unset.

", + "smithy.api#timestampFormat": "epoch-seconds" + } + }, + "vectorIngestionConfiguration": { + "target": "com.amazonaws.qconnect#VectorIngestionConfiguration", + "traits": { + "smithy.api#documentation": "

Contains details about how to ingest the documents in a data source.

" + } + }, + "sourceConfiguration": { + "target": "com.amazonaws.qconnect#SourceConfiguration", + "traits": { + "smithy.api#documentation": "

Source configuration information about the knowledge base.

" + } + }, + "renderingConfiguration": { + "target": "com.amazonaws.qconnect#RenderingConfiguration", + "traits": { + "smithy.api#documentation": "

Information about how to render the content.

" + } + }, + "serverSideEncryptionConfiguration": { + "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information for the customer managed key used for encryption.

\n

This KMS key must have a policy that allows kms:CreateGrant,\n kms:DescribeKey, kms:Decrypt, and\n kms:GenerateDataKey* permissions to the IAM identity using the\n key to invoke Amazon Q in Connect.

\n

For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for\n your instance.

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description.

" + } + }, + "tags": { + "target": "com.amazonaws.qconnect#Tags", + "traits": { + "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + } + }, + "ingestionStatus": { + "target": "com.amazonaws.qconnect#SyncStatus", + "traits": { + "smithy.api#documentation": "

Status of ingestion on data source.

" + } + }, + "ingestionFailureReasons": { + "target": "com.amazonaws.qconnect#FailureReason", + "traits": { + "smithy.api#documentation": "

List of failure reasons on ingestion per file.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the knowledge base.

", + "smithy.api#references": [ + { + "resource": "com.amazonaws.qconnect#KnowledgeBase" + } + ] + } + }, + "com.amazonaws.qconnect#KnowledgeBaseList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#KnowledgeBaseSummary" + } + }, + "com.amazonaws.qconnect#KnowledgeBaseSearchType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "HYBRID", + "name": "HYBRID" + }, + { + "value": "SEMANTIC", + "name": "SEMANTIC" + } + ] + } + }, + "com.amazonaws.qconnect#KnowledgeBaseStatus": { + "type": "string", "traits": { - "smithy.api#documentation": "

Association information about the knowledge base.

" + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "DELETED", + "name": "DELETED" + } + ] } }, - "com.amazonaws.qconnect#KnowledgeBaseData": { + "com.amazonaws.qconnect#KnowledgeBaseSummary": { "type": "structure", "members": { "knowledgeBaseId": { @@ -3689,21 +5821,20 @@ "status": { "target": "com.amazonaws.qconnect#KnowledgeBaseStatus", "traits": { - "smithy.api#documentation": "

The status of the knowledge base.

", + "smithy.api#documentation": "

The status of the knowledge base summary.

", "smithy.api#required": {} } }, - "lastContentModificationTime": { - "target": "smithy.api#Timestamp", + "sourceConfiguration": { + "target": "com.amazonaws.qconnect#SourceConfiguration", "traits": { - "smithy.api#documentation": "

An epoch timestamp indicating the most recent content modification inside the knowledge\n base. If no content exists in a knowledge base, this value is unset.

", - "smithy.api#timestampFormat": "epoch-seconds" + "smithy.api#documentation": "

Configuration information about the external data source.

" } }, - "sourceConfiguration": { - "target": "com.amazonaws.qconnect#SourceConfiguration", + "vectorIngestionConfiguration": { + "target": "com.amazonaws.qconnect#VectorIngestionConfiguration", "traits": { - "smithy.api#documentation": "

Source configuration information about the knowledge base.

" + "smithy.api#documentation": "

Contains details about how to ingest the documents in a data source.

" } }, "renderingConfiguration": { @@ -3721,7 +5852,7 @@ "description": { "target": "com.amazonaws.qconnect#Description", "traits": { - "smithy.api#documentation": "

The description.

" + "smithy.api#documentation": "

The description of the knowledge base.

" } }, "tags": { @@ -3732,7 +5863,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the knowledge base.

", + "smithy.api#documentation": "

Summary information about the knowledge base.

", "smithy.api#references": [ { "resource": "com.amazonaws.qconnect#KnowledgeBase" @@ -3740,147 +5871,436 @@ ] } }, - "com.amazonaws.qconnect#KnowledgeBaseList": { - "type": "list", - "member": { - "target": "com.amazonaws.qconnect#KnowledgeBaseSummary" - } - }, - "com.amazonaws.qconnect#KnowledgeBaseStatus": { + "com.amazonaws.qconnect#KnowledgeBaseType": { "type": "string", "traits": { "smithy.api#enum": [ { - "value": "CREATE_IN_PROGRESS", - "name": "CREATE_IN_PROGRESS" - }, - { - "value": "CREATE_FAILED", - "name": "CREATE_FAILED" + "value": "EXTERNAL", + "name": "EXTERNAL" }, { - "value": "ACTIVE", - "name": "ACTIVE" + "value": "CUSTOM", + "name": "CUSTOM" }, { - "value": "DELETE_IN_PROGRESS", - "name": "DELETE_IN_PROGRESS" + "value": "QUICK_RESPONSES", + "name": "QUICK_RESPONSES" }, { - "value": "DELETE_FAILED", - "name": "DELETE_FAILED" + "value": "MESSAGE_TEMPLATES", + "name": "MESSAGE_TEMPLATES" }, { - "value": "DELETED", - "name": "DELETED" + "value": "MANAGED", + "name": "MANAGED" } ] } }, - "com.amazonaws.qconnect#KnowledgeBaseSummary": { + "com.amazonaws.qconnect#LanguageCode": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 5 + } + } + }, + "com.amazonaws.qconnect#ListAIAgentVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#ListAIAgentVersionsRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#ListAIAgentVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List AI Agent versions.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents/{aiAgentId}/versions", + "method": "GET" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "aiAgentVersionSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qconnect#ListAIAgentVersionsRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Agent for which versions are to be\n listed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.qconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "origin": { + "target": "com.amazonaws.qconnect#Origin", + "traits": { + "smithy.api#documentation": "

The origin of the AI Agent versions to be listed. SYSTEM for a default AI\n Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI\n Agent creation APIs.

", + "smithy.api#httpQuery": "origin" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#ListAIAgentVersionsResponse": { + "type": "structure", + "members": { + "aiAgentVersionSummaries": { + "target": "com.amazonaws.qconnect#AIAgentVersionSummariesList", + "traits": { + "smithy.api#documentation": "

The summaries of AI Agent versions.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#ListAIAgents": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#ListAIAgentsRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#ListAIAgentsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists AI Agents.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents", + "method": "GET" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "aiAgentSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qconnect#ListAIAgentsRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.qconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return per page.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "origin": { + "target": "com.amazonaws.qconnect#Origin", + "traits": { + "smithy.api#documentation": "

The origin of the AI Agents to be listed. SYSTEM for a default AI Agent\n created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent\n creation APIs.

", + "smithy.api#httpQuery": "origin" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#ListAIAgentsResponse": { + "type": "structure", + "members": { + "aiAgentSummaries": { + "target": "com.amazonaws.qconnect#AIAgentSummaryList", + "traits": { + "smithy.api#documentation": "

The summaries of AI Agents.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#ListAIPromptVersions": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#ListAIPromptVersionsRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#ListAIPromptVersionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists AI Prompt versions.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts/{aiPromptId}/versions", + "method": "GET" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "aiPromptVersionSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qconnect#ListAIPromptVersionsRequest": { "type": "structure", "members": { - "knowledgeBaseId": { - "target": "com.amazonaws.qconnect#Uuid", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

The identifier of the knowledge base.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "knowledgeBaseArn": { - "target": "com.amazonaws.qconnect#Arn", + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the knowledge base.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI prompt for which versions are to be\n listed.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "name": { - "target": "com.amazonaws.qconnect#Name", + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", "traits": { - "smithy.api#documentation": "

The name of the knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

", + "smithy.api#httpQuery": "nextToken" } }, - "knowledgeBaseType": { - "target": "com.amazonaws.qconnect#KnowledgeBaseType", + "maxResults": { + "target": "com.amazonaws.qconnect#MaxResults", "traits": { - "smithy.api#documentation": "

The type of knowledge base.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The maximum number of results to return per page.

", + "smithy.api#httpQuery": "maxResults" } }, - "status": { - "target": "com.amazonaws.qconnect#KnowledgeBaseStatus", + "origin": { + "target": "com.amazonaws.qconnect#Origin", "traits": { - "smithy.api#documentation": "

The status of the knowledge base summary.

", + "smithy.api#documentation": "

The origin of the AI Prompt versions to be listed. SYSTEM for a default AI\n Agent created by Q in Connect or CUSTOMER for an AI Agent created by calling AI\n Agent creation APIs.

", + "smithy.api#httpQuery": "origin" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#ListAIPromptVersionsResponse": { + "type": "structure", + "members": { + "aiPromptVersionSummaries": { + "target": "com.amazonaws.qconnect#AIPromptVersionSummariesList", + "traits": { + "smithy.api#documentation": "

The summaries of the AI Prompt versions.

", "smithy.api#required": {} } }, - "sourceConfiguration": { - "target": "com.amazonaws.qconnect#SourceConfiguration", + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", "traits": { - "smithy.api#documentation": "

Configuration information about the external data source.

" + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#ListAIPrompts": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#ListAIPromptsRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#ListAIPromptsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" }, - "renderingConfiguration": { - "target": "com.amazonaws.qconnect#RenderingConfiguration", + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the AI Prompts available on the Amazon Q in Connect assistant.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts", + "method": "GET" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "aiPromptSummaries" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.qconnect#ListAIPromptsRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

Information about how to render the content.

" + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "serverSideEncryptionConfiguration": { - "target": "com.amazonaws.qconnect#ServerSideEncryptionConfiguration", + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", "traits": { - "smithy.api#documentation": "

The configuration information for the customer managed key used for encryption.

\n

This KMS key must have a policy that allows kms:CreateGrant,\n kms:DescribeKey, kms:Decrypt, and\n kms:GenerateDataKey* permissions to the IAM identity using the\n key to invoke Amazon Q in Connect.

\n

For more information about setting up a customer managed key for Amazon Q in Connect, see Enable Amazon Q in Connect for\n your instance.

" + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

", + "smithy.api#httpQuery": "nextToken" } }, - "description": { - "target": "com.amazonaws.qconnect#Description", + "maxResults": { + "target": "com.amazonaws.qconnect#MaxResults", "traits": { - "smithy.api#documentation": "

The description of the knowledge base.

" + "smithy.api#documentation": "

The maximum number of results to return per page.

", + "smithy.api#httpQuery": "maxResults" } }, - "tags": { - "target": "com.amazonaws.qconnect#Tags", + "origin": { + "target": "com.amazonaws.qconnect#Origin", "traits": { - "smithy.api#documentation": "

The tags used to organize, track, or control access for this resource.

" + "smithy.api#documentation": "

The origin of the AI Prompts to be listed. SYSTEM for a default AI Agent\n created by Q in Connect or CUSTOMER for an AI Agent created by calling AI Agent\n creation APIs.

", + "smithy.api#httpQuery": "origin" } } }, "traits": { - "smithy.api#documentation": "

Summary information about the knowledge base.

", - "smithy.api#references": [ - { - "resource": "com.amazonaws.qconnect#KnowledgeBase" - } - ] + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#KnowledgeBaseType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "EXTERNAL", - "name": "EXTERNAL" - }, - { - "value": "CUSTOM", - "name": "CUSTOM" - }, - { - "value": "QUICK_RESPONSES", - "name": "QUICK_RESPONSES" + "com.amazonaws.qconnect#ListAIPromptsResponse": { + "type": "structure", + "members": { + "aiPromptSummaries": { + "target": "com.amazonaws.qconnect#AIPromptSummaryList", + "traits": { + "smithy.api#documentation": "

The summaries of the AI Prompts.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.qconnect#NextToken", + "traits": { + "smithy.api#documentation": "

The token for the next set of results. Use the value returned in the previous response in\n the next request to retrieve the next set of results.

" } - ] - } - }, - "com.amazonaws.qconnect#LanguageCode": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 2, - "max": 5 } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.qconnect#ListAssistantAssociations": { @@ -4492,6 +6912,40 @@ } } }, + "com.amazonaws.qconnect#ManagedSourceConfiguration": { + "type": "union", + "members": { + "webCrawlerConfiguration": { + "target": "com.amazonaws.qconnect#WebCrawlerConfiguration", + "traits": { + "smithy.api#documentation": "

Configuration data for web crawler data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Source configuration for managed resources.

" + } + }, + "com.amazonaws.qconnect#ManualSearchAIAgentConfiguration": { + "type": "structure", + "members": { + "answerGenerationAIPromptId": { + "target": "com.amazonaws.qconnect#UuidWithQualifier", + "traits": { + "smithy.api#documentation": "

The AI Prompt identifier for the Answer Generation prompt used by the MANUAL_SEARCH AI\n Agent.

" + } + }, + "associationConfigurations": { + "target": "com.amazonaws.qconnect#AssociationConfigurationList", + "traits": { + "smithy.api#documentation": "

The association configurations for overriding behavior on this AI Agent.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for the MANUAL_SEARCH AI Agent type.

" + } + }, "com.amazonaws.qconnect#MaxResults": { "type": "integer", "traits": { @@ -4520,6 +6974,16 @@ } } }, + "com.amazonaws.qconnect#NonEmptySensitiveString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.qconnect#NonEmptyString": { "type": "string", "traits": { @@ -4660,26 +7124,97 @@ } }, "traits": { - "smithy.api#documentation": "

A list of conditions which would be applied together with an OR\n condition.

" + "smithy.api#documentation": "

A list of conditions which would be applied together with an OR\n condition.

" + } + }, + "com.amazonaws.qconnect#OrConditions": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#OrCondition" + } + }, + "com.amazonaws.qconnect#Order": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ASC", + "name": "ASC" + }, + { + "value": "DESC", + "name": "DESC" + } + ] + } + }, + "com.amazonaws.qconnect#Origin": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SYSTEM", + "name": "SYSTEM" + }, + { + "value": "CUSTOMER", + "name": "CUSTOMER" + } + ] + } + }, + "com.amazonaws.qconnect#ParsingConfiguration": { + "type": "structure", + "members": { + "parsingStrategy": { + "target": "com.amazonaws.qconnect#ParsingStrategy", + "traits": { + "smithy.api#documentation": "

The parsing strategy for the data source.

", + "smithy.api#required": {} + } + }, + "bedrockFoundationModelConfiguration": { + "target": "com.amazonaws.qconnect#BedrockFoundationModelConfigurationForParsing", + "traits": { + "smithy.api#documentation": "

Settings for a foundation model used to parse documents for a data source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for parsing document contents. By default, the service converts the contents of\n each document into text before splitting it into chunks. To improve processing of PDF files\n with tables and images, you can configure the data source to convert the pages of text into\n images and use a model to describe the contents of each page.

" + } + }, + "com.amazonaws.qconnect#ParsingPrompt": { + "type": "structure", + "members": { + "parsingPromptText": { + "target": "com.amazonaws.qconnect#ParsingPromptText", + "traits": { + "smithy.api#documentation": "

Instructions for interpreting the contents of a document.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Instructions for interpreting the contents of a document.

" } }, - "com.amazonaws.qconnect#OrConditions": { - "type": "list", - "member": { - "target": "com.amazonaws.qconnect#OrCondition" + "com.amazonaws.qconnect#ParsingPromptText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10000 + } } }, - "com.amazonaws.qconnect#Order": { + "com.amazonaws.qconnect#ParsingStrategy": { "type": "string", "traits": { "smithy.api#enum": [ { - "value": "ASC", - "name": "ASC" - }, - { - "value": "DESC", - "name": "DESC" + "value": "BEDROCK_FOUNDATION_MODEL", + "name": "BEDROCK_FOUNDATION_MODEL" } ] } @@ -4881,8 +7416,7 @@ "queryText": { "target": "com.amazonaws.qconnect#QueryText", "traits": { - "smithy.api#documentation": "

The text to search for.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The text to search for.

" } }, "nextToken": { @@ -4908,6 +7442,18 @@ "traits": { "smithy.api#documentation": "

Information about how to query content.

" } + }, + "queryInputData": { + "target": "com.amazonaws.qconnect#QueryInputData", + "traits": { + "smithy.api#documentation": "

Information about the query.

" + } + }, + "overrideKnowledgeBaseSearchType": { + "target": "com.amazonaws.qconnect#KnowledgeBaseSearchType", + "traits": { + "smithy.api#documentation": "

The search type to be used against the Knowledge Base for this request. The values can be\n SEMANTIC which uses vector embeddings or HYBRID which use vector\n embeddings and raw text.

" + } } } }, @@ -5006,6 +7552,26 @@ "smithy.api#documentation": "

The condition for the query.

" } }, + "com.amazonaws.qconnect#QueryInputData": { + "type": "union", + "members": { + "queryTextInputData": { + "target": "com.amazonaws.qconnect#QueryTextInputData", + "traits": { + "smithy.api#documentation": "

Input information for the query.

" + } + }, + "intentInputData": { + "target": "com.amazonaws.qconnect#IntentInputData", + "traits": { + "smithy.api#documentation": "

Input information for the intent.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Input information for the query.

" + } + }, "com.amazonaws.qconnect#QueryRecommendationTriggerData": { "type": "structure", "members": { @@ -5028,6 +7594,10 @@ "value": "KNOWLEDGE_CONTENT", "name": "KNOWLEDGE_CONTENT" }, + { + "value": "INTENT_ANSWER", + "name": "INTENT_ANSWER" + }, { "value": "GENERATIVE_ANSWER", "name": "GENERATIVE_ANSWER" @@ -5045,11 +7615,26 @@ "type": "string", "traits": { "smithy.api#length": { - "max": 1024 + "max": 512 }, "smithy.api#sensitive": {} } }, + "com.amazonaws.qconnect#QueryTextInputData": { + "type": "structure", + "members": { + "text": { + "target": "com.amazonaws.qconnect#QueryText", + "traits": { + "smithy.api#documentation": "

The text to search for.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the text to search for.

" + } + }, "com.amazonaws.qconnect#QuickResponse": { "type": "resource", "identifiers": { @@ -6030,6 +8615,25 @@ { "value": "GENERATIVE_ANSWER", "name": "GENERATIVE_ANSWER" + }, + { + "value": "DETECTED_INTENT", + "name": "DETECTED_INTENT" + } + ] + } + }, + "com.amazonaws.qconnect#ReferenceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "WEB_CRAWLER", + "name": "WEB_CRAWLER" + }, + { + "value": "KNOWLEDGE_BASE", + "name": "KNOWLEDGE_BASE" } ] } @@ -6077,6 +8681,69 @@ } } }, + "com.amazonaws.qconnect#RemoveAssistantAIAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#RemoveAssistantAIAgentRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#RemoveAssistantAIAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes the AI Agent that is set for use by defafult on an Amazon Q in Connect\n Assistant.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagentConfiguration", + "method": "DELETE", + "code": 204 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#RemoveAssistantAIAgentRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiAgentType": { + "target": "com.amazonaws.qconnect#AIAgentType", + "traits": { + "smithy.api#documentation": "

The type of the AI Agent being removed for use by default from the Amazon Q in Connect\n Assistant.

", + "smithy.api#httpQuery": "aiAgentType", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#RemoveAssistantAIAgentResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.qconnect#RemoveKnowledgeBaseTemplateUri": { "type": "operation", "input": { @@ -6209,6 +8876,54 @@ "smithy.api#documentation": "

Information about the result.

" } }, + "com.amazonaws.qconnect#RuntimeSessionData": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.qconnect#NonEmptySensitiveString", + "traits": { + "smithy.api#documentation": "

The key of the data stored on the session.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.qconnect#RuntimeSessionDataValue", + "traits": { + "smithy.api#documentation": "

The value of the data stored on the session.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The list of key-value pairs that are stored on the session.

" + } + }, + "com.amazonaws.qconnect#RuntimeSessionDataList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#RuntimeSessionData" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.qconnect#RuntimeSessionDataValue": { + "type": "union", + "members": { + "stringValue": { + "target": "com.amazonaws.qconnect#NonEmptySensitiveString", + "traits": { + "smithy.api#documentation": "

The string value of the data stored on the session.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A union type that specifies the data stored on the session.

" + } + }, "com.amazonaws.qconnect#SearchContent": { "type": "operation", "input": { @@ -6497,6 +9212,72 @@ } } }, + "com.amazonaws.qconnect#SeedUrl": { + "type": "structure", + "members": { + "url": { + "target": "com.amazonaws.qconnect#WebUrl", + "traits": { + "smithy.api#documentation": "

URL for crawling

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A URL for crawling.

" + } + }, + "com.amazonaws.qconnect#SeedUrls": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#SeedUrl" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.qconnect#SemanticChunkingConfiguration": { + "type": "structure", + "members": { + "maxTokens": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of tokens that a chunk can contain.

", + "smithy.api#range": { + "min": 1 + }, + "smithy.api#required": {} + } + }, + "bufferSize": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The buffer size.

", + "smithy.api#range": { + "min": 0, + "max": 1 + }, + "smithy.api#required": {} + } + }, + "breakpointPercentileThreshold": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The dissimilarity threshold for splitting chunks.

", + "smithy.api#range": { + "min": 50, + "max": 99 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Settings for semantic document chunking for a data source. Semantic chunking splits a\n document into smaller documents based on groups of similar content derived from the text with\n natural language processing.

" + } + }, "com.amazonaws.qconnect#SensitiveString": { "type": "string", "traits": { @@ -6549,6 +9330,11 @@ "update": { "target": "com.amazonaws.qconnect#UpdateSession" }, + "operations": [ + { + "target": "com.amazonaws.qconnect#UpdateSessionData" + } + ], "traits": { "aws.api#arn": { "template": "session/{assistantId}/{sessionId}" @@ -6603,12 +9389,29 @@ "traits": { "smithy.api#documentation": "

An object that can be used to specify Tag conditions.

" } + }, + "aiAgentConfiguration": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationMap", + "traits": { + "smithy.api#documentation": "

The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that\n should be used by Amazon Q in Connect for this Session.

" + } } }, "traits": { "smithy.api#documentation": "

Information about the session.

" } }, + "com.amazonaws.qconnect#SessionDataNamespace": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Custom", + "name": "Custom" + } + ] + } + }, "com.amazonaws.qconnect#SessionIntegrationConfiguration": { "type": "structure", "members": { @@ -6682,6 +9485,12 @@ "traits": { "smithy.api#documentation": "

Configuration information for Amazon AppIntegrations to automatically ingest content.

" } + }, + "managedSourceConfiguration": { + "target": "com.amazonaws.qconnect#ManagedSourceConfiguration", + "traits": { + "smithy.api#documentation": "

Source configuration for managed resources.

" + } } }, "traits": { @@ -6718,6 +9527,12 @@ "smithy.api#documentation": "

Details about the source content ranking data.

", "smithy.api#required": {} } + }, + "citationSpan": { + "target": "com.amazonaws.qconnect#CitationSpan", + "traits": { + "smithy.api#documentation": "

Contains information about where the text with a citation begins and ends in the generated output.

" + } } }, "traits": { @@ -6899,24 +9714,78 @@ "traits": { "smithy.api#documentation": "

The configuration information of the external source that the resource data are imported\n from.

" } - } - }, - "traits": { - "smithy.api#input": {} + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#StartImportJobResponse": { + "type": "structure", + "members": { + "importJob": { + "target": "com.amazonaws.qconnect#ImportJobData", + "traits": { + "smithy.api#documentation": "

The import job.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#Status": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "DELETED", + "name": "DELETED" + } + ] } }, - "com.amazonaws.qconnect#StartImportJobResponse": { - "type": "structure", - "members": { - "importJob": { - "target": "com.amazonaws.qconnect#ImportJobData", - "traits": { - "smithy.api#documentation": "

The import job.

" - } - } - }, + "com.amazonaws.qconnect#SyncStatus": { + "type": "string", "traits": { - "smithy.api#output": {} + "smithy.api#enum": [ + { + "value": "SYNC_FAILED", + "name": "SYNC_FAILED" + }, + { + "value": "SYNCING_IN_PROGRESS", + "name": "SYNCING_IN_PROGRESS" + }, + { + "value": "SYNC_SUCCESS", + "name": "SYNC_SUCCESS" + }, + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + } + ] } }, "com.amazonaws.qconnect#TagCondition": { @@ -7070,6 +9939,16 @@ ] } }, + "com.amazonaws.qconnect#TextAIPrompt": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 200000 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.qconnect#TextData": { "type": "structure", "members": { @@ -7084,6 +9963,21 @@ "smithy.api#documentation": "

Details about the source content text data.

" } }, + "com.amazonaws.qconnect#TextFullAIPromptEditTemplateConfiguration": { + "type": "structure", + "members": { + "text": { + "target": "com.amazonaws.qconnect#TextAIPrompt", + "traits": { + "smithy.api#documentation": "

The YAML text for the AI Prompt template.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for a prompt template that supports full textual prompt configuration\n using a YAML prompt.

" + } + }, "com.amazonaws.qconnect#ThrottlingException": { "type": "structure", "members": { @@ -7108,71 +10002,338 @@ } } }, - "com.amazonaws.qconnect#TooManyTagsException": { + "com.amazonaws.qconnect#TooManyTagsException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "resourceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The specified resource name.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Amazon Q in Connect throws this exception if you have too many tags in your tag set.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.qconnect#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

Removes the specified tags from the specified resource.

", + "smithy.api#http": { + "uri": "/tags/{resourceArn}", + "method": "DELETE" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.qconnect#TagKeyList", + "traits": { + "smithy.api#documentation": "

The tag keys.

", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.qconnect#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.qconnect#UpdateAIAgent": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#UpdateAIAgentRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#UpdateAIAgentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an AI Agent.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiagents/{aiAgentId}", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#UpdateAIAgentRequest": { + "type": "structure", + "members": { + "clientToken": { + "target": "com.amazonaws.qconnect#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} + } + }, + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiAgentId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Agent.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", + "traits": { + "smithy.api#documentation": "

The visbility status of the Amazon Q in Connect AI Agent.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.qconnect#AIAgentConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the Amazon Q in Connect AI Agent.

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the Amazon Q in Connect AI Agent.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#UpdateAIAgentResponse": { + "type": "structure", + "members": { + "aiAgent": { + "target": "com.amazonaws.qconnect#AIAgentData", + "traits": { + "smithy.api#documentation": "

The data of the updated Amazon Q in Connect AI Agent.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.qconnect#UpdateAIPrompt": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#UpdateAIPromptRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#UpdateAIPromptResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ConflictException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an AI Prompt.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/aiprompts/{aiPromptId}", + "method": "POST" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.qconnect#UpdateAIPromptRequest": { + "type": "structure", + "members": { + "clientToken": { + "target": "com.amazonaws.qconnect#ClientToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the AWS SDK populates this field. For more information about\n idempotency, see Making retries safe with idempotent APIs.

", + "smithy.api#idempotencyToken": {} + } + }, + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "aiPromptId": { + "target": "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect AI Prompt.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "visibilityStatus": { + "target": "com.amazonaws.qconnect#VisibilityStatus", + "traits": { + "smithy.api#documentation": "

The visibility status of the Amazon Q in Connect AI prompt.

", + "smithy.api#required": {} + } + }, + "templateConfiguration": { + "target": "com.amazonaws.qconnect#AIPromptTemplateConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the prompt template for this AI Prompt.

" + } + }, + "description": { + "target": "com.amazonaws.qconnect#Description", + "traits": { + "smithy.api#documentation": "

The description of the Amazon Q in Connect AI Prompt.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#UpdateAIPromptResponse": { "type": "structure", "members": { - "message": { - "target": "smithy.api#String" - }, - "resourceName": { - "target": "smithy.api#String", + "aiPrompt": { + "target": "com.amazonaws.qconnect#AIPromptData", "traits": { - "smithy.api#documentation": "

The specified resource name.

" + "smithy.api#documentation": "

The data of the updated Amazon Q in Connect AI Prompt.

" } } }, "traits": { - "smithy.api#documentation": "

Amazon Q in Connect throws this exception if you have too many tags in your tag set.

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 + "smithy.api#output": {} } }, - "com.amazonaws.qconnect#UntagResource": { + "com.amazonaws.qconnect#UpdateAssistantAIAgent": { "type": "operation", "input": { - "target": "com.amazonaws.qconnect#UntagResourceRequest" + "target": "com.amazonaws.qconnect#UpdateAssistantAIAgentRequest" }, "output": { - "target": "com.amazonaws.qconnect#UntagResourceResponse" + "target": "com.amazonaws.qconnect#UpdateAssistantAIAgentResponse" }, "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, { "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ThrottlingException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" } ], "traits": { - "smithy.api#documentation": "

Removes the specified tags from the specified resource.

", + "smithy.api#documentation": "

Updates the AI Agent that is set for use by defafult on an Amazon Q in Connect\n Assistant.

", "smithy.api#http": { - "uri": "/tags/{resourceArn}", - "method": "DELETE" - }, - "smithy.api#idempotent": {} + "uri": "/assistants/{assistantId}/aiagentConfiguration", + "method": "POST" + } } }, - "com.amazonaws.qconnect#UntagResourceRequest": { + "com.amazonaws.qconnect#UpdateAssistantAIAgentRequest": { "type": "structure", "members": { - "resourceArn": { - "target": "com.amazonaws.qconnect#Arn", + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource.

", + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "tagKeys": { - "target": "com.amazonaws.qconnect#TagKeyList", + "aiAgentType": { + "target": "com.amazonaws.qconnect#AIAgentType", "traits": { - "smithy.api#documentation": "

The tag keys.

", - "smithy.api#httpQuery": "tagKeys", + "smithy.api#documentation": "

The type of the AI Agent being updated for use by default on the Amazon Q in Connect\n Assistant.

", + "smithy.api#required": {} + } + }, + "configuration": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationData", + "traits": { + "smithy.api#documentation": "

The configuration of the AI Agent being updated for use by default on the Amazon Q in\n Connect Assistant.

", "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, - "com.amazonaws.qconnect#UntagResourceResponse": { + "com.amazonaws.qconnect#UpdateAssistantAIAgentResponse": { "type": "structure", - "members": {} + "members": { + "assistant": { + "target": "com.amazonaws.qconnect#AssistantData" + } + }, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.qconnect#UpdateContent": { "type": "operation", @@ -7500,6 +10661,106 @@ } } }, + "com.amazonaws.qconnect#UpdateSessionData": { + "type": "operation", + "input": { + "target": "com.amazonaws.qconnect#UpdateSessionDataRequest" + }, + "output": { + "target": "com.amazonaws.qconnect#UpdateSessionDataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.qconnect#AccessDeniedException" + }, + { + "target": "com.amazonaws.qconnect#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.qconnect#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the data stored on an Amazon Q in Connect Session.

", + "smithy.api#http": { + "uri": "/assistants/{assistantId}/sessions/{sessionId}/data", + "method": "PATCH" + } + } + }, + "com.amazonaws.qconnect#UpdateSessionDataRequest": { + "type": "structure", + "members": { + "assistantId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs\n cannot contain the ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.qconnect#UuidOrArn", + "traits": { + "smithy.api#documentation": "

The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the\n ARN.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.qconnect#SessionDataNamespace", + "traits": { + "smithy.api#documentation": "

The namespace into which the session data is stored. Supported namespaces are:\n Custom

" + } + }, + "data": { + "target": "com.amazonaws.qconnect#RuntimeSessionDataList", + "traits": { + "smithy.api#documentation": "

The data stored on the Amazon Q in Connect Session.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.qconnect#UpdateSessionDataResponse": { + "type": "structure", + "members": { + "sessionArn": { + "target": "com.amazonaws.qconnect#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the session.

", + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.qconnect#Uuid", + "traits": { + "smithy.api#documentation": "

The identifier of the session.

", + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.qconnect#SessionDataNamespace", + "traits": { + "smithy.api#documentation": "

The namespace into which the session data is stored. Supported namespaces are:\n Custom

", + "smithy.api#required": {} + } + }, + "data": { + "target": "com.amazonaws.qconnect#RuntimeSessionDataList", + "traits": { + "smithy.api#documentation": "

Data stored in the session.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.qconnect#UpdateSessionRequest": { "type": "structure", "members": { @@ -7530,6 +10791,12 @@ "traits": { "smithy.api#documentation": "

An object that can be used to specify Tag conditions.

" } + }, + "aiAgentConfiguration": { + "target": "com.amazonaws.qconnect#AIAgentConfigurationMap", + "traits": { + "smithy.api#documentation": "

The configuration of the AI Agents (mapped by AI Agent Type to AI Agent version) that\n should be used by Amazon Q in Connect for this Session.

" + } } }, "traits": { @@ -7575,6 +10842,43 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.qconnect#UrlConfiguration": { + "type": "structure", + "members": { + "seedUrls": { + "target": "com.amazonaws.qconnect#SeedUrls", + "traits": { + "smithy.api#documentation": "

List of URLs for crawling.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the URL/URLs for the web content that you want to crawl. You should\n be authorized to crawl the URLs.

" + } + }, + "com.amazonaws.qconnect#UrlFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.qconnect#UrlFilterPattern" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 25 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.qconnect#UrlFilterPattern": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#sensitive": {} + } + }, "com.amazonaws.qconnect#Uuid": { "type": "string", "traits": { @@ -7587,6 +10891,18 @@ "smithy.api#pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" } }, + "com.amazonaws.qconnect#UuidOrArnOrEitherWithQualifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$" + } + }, + "com.amazonaws.qconnect#UuidWithQualifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$" + } + }, "com.amazonaws.qconnect#ValidationException": { "type": "structure", "members": { @@ -7600,6 +10916,49 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.qconnect#VectorIngestionConfiguration": { + "type": "structure", + "members": { + "chunkingConfiguration": { + "target": "com.amazonaws.qconnect#ChunkingConfiguration", + "traits": { + "smithy.api#documentation": "

Details about how to chunk the documents in the data source. A chunk refers to an excerpt\n from a data source that is returned when the knowledge base that it belongs to is\n queried.

" + } + }, + "parsingConfiguration": { + "target": "com.amazonaws.qconnect#ParsingConfiguration", + "traits": { + "smithy.api#documentation": "

A custom parser for data source documents.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains details about how to ingest the documents in a data source.

" + } + }, + "com.amazonaws.qconnect#Version": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.qconnect#VisibilityStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SAVED", + "name": "SAVED" + }, + { + "value": "PUBLISHED", + "name": "PUBLISHED" + } + ] + } + }, "com.amazonaws.qconnect#WaitTimeSeconds": { "type": "integer", "traits": { @@ -7610,6 +10969,84 @@ } } }, + "com.amazonaws.qconnect#WebCrawlerConfiguration": { + "type": "structure", + "members": { + "urlConfiguration": { + "target": "com.amazonaws.qconnect#UrlConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration of the URL/URLs for the web content that you want to crawl. You should\n be authorized to crawl the URLs.

", + "smithy.api#required": {} + } + }, + "crawlerLimits": { + "target": "com.amazonaws.qconnect#WebCrawlerLimits", + "traits": { + "smithy.api#documentation": "

The configuration of crawl limits for the web URLs.

" + } + }, + "inclusionFilters": { + "target": "com.amazonaws.qconnect#UrlFilterList", + "traits": { + "smithy.api#documentation": "

A list of one or more inclusion regular expression patterns to include certain URLs. If\n you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion\n filter takes precedence and the web content of the URL isn’t crawled.

" + } + }, + "exclusionFilters": { + "target": "com.amazonaws.qconnect#UrlFilterList", + "traits": { + "smithy.api#documentation": "

A list of one or more exclusion regular expression patterns to exclude certain URLs. If\n you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion\n filter takes precedence and the web content of the URL isn’t crawled.

" + } + }, + "scope": { + "target": "com.amazonaws.qconnect#WebScopeType", + "traits": { + "smithy.api#documentation": "

The scope of what is crawled for your URLs. You can choose to crawl only web pages that\n belong to the same host or primary domain. For example, only web pages that contain the seed\n URL https://docs.aws.amazon.com/bedrock/latest/userguide/ and no other domains.\n You can choose to include sub domains in addition to the host or primary domain. For example,\n web pages that contain aws.amazon.com can also include sub domain\n docs.aws.amazon.com.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration details for the web data source.

" + } + }, + "com.amazonaws.qconnect#WebCrawlerLimits": { + "type": "structure", + "members": { + "rateLimit": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Rate of web URLs retrieved per minute.

", + "smithy.api#range": { + "min": 1, + "max": 300 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of crawl limits for the web URLs.

" + } + }, + "com.amazonaws.qconnect#WebScopeType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "HOST_ONLY", + "name": "HOST_ONLY" + }, + { + "value": "SUBDOMAINS", + "name": "SUBDOMAINS" + } + ] + } + }, + "com.amazonaws.qconnect#WebUrl": { + "type": "string", + "traits": { + "smithy.api#pattern": "^https?://[A-Za-z0-9][^\\s]*$" + } + }, "com.amazonaws.qconnect#WisdomService": { "type": "service", "version": "2020-10-19", @@ -7643,7 +11080,7 @@ }, "aws.protocols#restJson1": {}, "smithy.api#cors": {}, - "smithy.api#documentation": "\n

\n Powered by Amazon Bedrock: Amazon Web Services implements automated abuse\n detection. Because Amazon Q in Connect is built on Amazon Bedrock, users can take full advantage of\n the controls implemented in Amazon Bedrock to enforce safety, security, and the responsible use of\n artificial intelligence (AI).

\n
\n

Amazon Q in Connect is a generative AI customer service assistant. It is an LLM-enhanced\n evolution of Amazon Connect Wisdom that delivers real-time recommendations to help contact\n center agents resolve customer issues quickly and accurately.

\n

Amazon Q in Connect automatically detects customer intent during calls and chats using conversational\n analytics and natural language understanding (NLU). It then provides agents with immediate,\n real-time generative responses and suggested actions, and links to relevant documents and\n articles. Agents can also query Amazon Q in Connect directly using natural language or keywords to answer\n customer requests.

\n

Use the Amazon Q in Connect APIs to create an assistant and a knowledge base, for example, or\n manage content by uploading custom files.

\n

For more information, see Use Amazon Q in Connect for generative AI\n powered agent assistance in real-time in the Amazon Connect\n Administrator Guide.

", + "smithy.api#documentation": "\n \n

\n Powered by Amazon Bedrock: Amazon Web Services implements automated abuse\n detection. Because Amazon Q in Connect is built on Amazon Bedrock, users can take full advantage of\n the controls implemented in Amazon Bedrock to enforce safety, security, and the responsible use of\n artificial intelligence (AI).

\n
\n

Amazon Q in Connect is a generative AI customer service assistant. It is an LLM-enhanced\n evolution of Amazon Connect Wisdom that delivers real-time recommendations to help contact\n center agents resolve customer issues quickly and accurately.

\n

Amazon Q in Connect automatically detects customer intent during calls and chats using conversational\n analytics and natural language understanding (NLU). It then provides agents with immediate,\n real-time generative responses and suggested actions, and links to relevant documents and\n articles. Agents can also query Amazon Q in Connect directly using natural language or keywords to answer\n customer requests.

\n

Use the Amazon Q in Connect APIs to create an assistant and a knowledge base, for example, or\n manage content by uploading custom files.

\n

For more information, see Use Amazon Q in Connect for generative AI\n powered agent assistance in real-time in the Amazon Connect\n Administrator Guide.

", "smithy.api#title": "Amazon Q Connect", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/quicksight.json b/models/quicksight.json index 04b27dedce..439064712b 100644 --- a/models/quicksight.json +++ b/models/quicksight.json @@ -1456,6 +1456,12 @@ "traits": { "smithy.api#documentation": "

An optional list of structures that control how Dashboard resources are parameterized in the returned CloudFormation template.

" } + }, + "Folders": { + "target": "com.amazonaws.quicksight#AssetBundleExportJobFolderOverridePropertiesList", + "traits": { + "smithy.api#documentation": "

An optional list of structures that controls how Folder resources are parameterized in the returned CloudFormation template.

" + } } }, "traits": { @@ -1841,6 +1847,69 @@ "target": "com.amazonaws.quicksight#AssetBundleExportJobError" } }, + "com.amazonaws.quicksight#AssetBundleExportJobFolderOverrideProperties": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The ARN of the specific Folder resource whose override properties are configured in this structure.

", + "smithy.api#required": {} + } + }, + "Properties": { + "target": "com.amazonaws.quicksight#AssetBundleExportJobFolderPropertyToOverrideList", + "traits": { + "smithy.api#documentation": "

A list of Folder resource properties to generate variables for in the returned CloudFormation template.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Controls how a specific Folder resource is parameterized in the returned CloudFormation template.

" + } + }, + "com.amazonaws.quicksight#AssetBundleExportJobFolderOverridePropertiesList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AssetBundleExportJobFolderOverrideProperties" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.quicksight#AssetBundleExportJobFolderPropertyToOverride": { + "type": "enum", + "members": { + "NAME": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Name" + } + }, + "PARENT_FOLDER_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ParentFolderArn" + } + } + } + }, + "com.amazonaws.quicksight#AssetBundleExportJobFolderPropertyToOverrideList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AssetBundleExportJobFolderPropertyToOverride" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.quicksight#AssetBundleExportJobRefreshScheduleOverrideProperties": { "type": "structure", "members": { @@ -2700,6 +2769,109 @@ "target": "com.amazonaws.quicksight#AssetBundleImportJobError" } }, + "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideParameters": { + "type": "structure", + "members": { + "FolderId": { + "target": "com.amazonaws.quicksight#ResourceId", + "traits": { + "smithy.api#documentation": "

The ID of the folder that you want to apply overrides to.

", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.quicksight#ResourceName", + "traits": { + "smithy.api#documentation": "

A new name for the folder.

" + } + }, + "ParentFolderArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The override parameters for a single folder that is being imported.

" + } + }, + "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideParametersList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideParameters" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.quicksight#AssetBundleImportJobFolderOverridePermissions": { + "type": "structure", + "members": { + "FolderIds": { + "target": "com.amazonaws.quicksight#AssetBundleRestrictiveResourceIdList", + "traits": { + "smithy.api#documentation": "

A list of folder IDs that you want to apply overrides to. You can use * to override all folders in this asset bundle.

", + "smithy.api#required": {} + } + }, + "Permissions": { + "target": "com.amazonaws.quicksight#AssetBundleResourcePermissions" + } + }, + "traits": { + "smithy.api#documentation": "

An object that contains a list of permissions to be applied to a list of folder IDs.

" + } + }, + "com.amazonaws.quicksight#AssetBundleImportJobFolderOverridePermissionsList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AssetBundleImportJobFolderOverridePermissions" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideTags": { + "type": "structure", + "members": { + "FolderIds": { + "target": "com.amazonaws.quicksight#AssetBundleRestrictiveResourceIdList", + "traits": { + "smithy.api#documentation": "

A list of folder IDs that you want to apply overrides to. You can use * to override all folders in this asset bundle.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.quicksight#TagList", + "traits": { + "smithy.api#documentation": "

A list of tags for the folders that you want to apply overrides to.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An object that contains a list of tags to be assigned to a list of folder IDs.

" + } + }, + "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideTagsList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideTags" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, "com.amazonaws.quicksight#AssetBundleImportJobOverrideParameters": { "type": "structure", "members": { @@ -2750,6 +2922,12 @@ "traits": { "smithy.api#documentation": "

A list of overrides for any Dashboard resources that are present in the asset bundle that is imported.

" } + }, + "Folders": { + "target": "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideParametersList", + "traits": { + "smithy.api#documentation": "

A list of overrides for any Folder resources that are present in the asset bundle that is imported.

" + } } }, "traits": { @@ -2788,6 +2966,12 @@ "traits": { "smithy.api#documentation": "

A list of permissions overrides for any Dashboard resources that are present in the asset bundle that is imported.

" } + }, + "Folders": { + "target": "com.amazonaws.quicksight#AssetBundleImportJobFolderOverridePermissionsList", + "traits": { + "smithy.api#documentation": "

A list of permissions for the folders that you want to apply overrides to.

" + } } }, "traits": { @@ -2832,6 +3016,12 @@ "traits": { "smithy.api#documentation": "

A list of tag overrides for any Dashboard resources that are present in the asset bundle that is imported.

" } + }, + "Folders": { + "target": "com.amazonaws.quicksight#AssetBundleImportJobFolderOverrideTagsList", + "traits": { + "smithy.api#documentation": "

A list of tag overrides for any Folder resources that are present in the asset bundle that is imported.

" + } } }, "traits": { @@ -6679,6 +6869,23 @@ "smithy.api#documentation": "

A combo chart.

\n

The ComboChartVisual includes stacked bar combo charts and clustered bar combo charts

\n

For more information, see Using combo charts in the Amazon QuickSight User Guide.

" } }, + "com.amazonaws.quicksight#CommitMode": { + "type": "enum", + "members": { + "AUTO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AUTO" + } + }, + "MANUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MANUAL" + } + } + } + }, "com.amazonaws.quicksight#ComparativeOrder": { "type": "structure", "members": { @@ -14472,6 +14679,12 @@ "traits": { "smithy.api#documentation": "

The display options of a control.

" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configuration of the Apply button on a DateTimePickerControl.

" + } } }, "traits": { @@ -14570,6 +14783,12 @@ "traits": { "smithy.api#documentation": "

A list of selectable values that are used in a control.

" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configuration of the Apply button on a FilterDropDownControl.

" + } } }, "traits": { @@ -14720,6 +14939,12 @@ "traits": { "smithy.api#documentation": "

The display options of a control.

" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configuration of the Apply button on a RelativeDateTimeControl.

" + } } }, "traits": { @@ -18127,6 +18352,19 @@ "traits": { "smithy.api#documentation": "

An array of warning records that describe the analysis or dashboard that is exported. This array includes UI errors that can be skipped during the validation process.

\n

This property only appears if StrictModeForAllResources in ValidationStrategy is set to FALSE.

" } + }, + "IncludeFolderMemberships": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

The include folder memberships flag.

" + } + }, + "IncludeFolderMembers": { + "target": "com.amazonaws.quicksight#IncludeFolderMembers", + "traits": { + "smithy.api#documentation": "

A setting that determines whether folder members are included.

" + } } }, "traits": { @@ -20415,6 +20653,87 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#DescribeQPersonalizationConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#DescribeQPersonalizationConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#DescribeQPersonalizationConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#ConflictException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Describes a personalization configuration.

", + "smithy.api#http": { + "method": "GET", + "uri": "/accounts/{AwsAccountId}/q-personalization-configuration", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#DescribeQPersonalizationConfigurationRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account that contains the personalization configuration that the user wants described.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#DescribeQPersonalizationConfigurationResponse": { + "type": "structure", + "members": { + "PersonalizationMode": { + "target": "com.amazonaws.quicksight#PersonalizationMode", + "traits": { + "smithy.api#documentation": "

A value that indicates whether personalization is enabled or not.

" + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + }, + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

", + "smithy.api#httpResponseCode": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#DescribeRefreshSchedule": { "type": "operation", "input": { @@ -23481,6 +23800,12 @@ "traits": { "smithy.api#documentation": "

The type of the FilterDropDownControl. Choose one of the following options:

\n
    \n
  • \n

    \n MULTI_SELECT: The user can select multiple entries from a dropdown menu.

    \n
  • \n
  • \n

    \n SINGLE_SELECT: The user can select a single entry from a dropdown menu.

    \n
  • \n
" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configurationof the Apply button on a DateTimePickerControl.

" + } } }, "traits": { @@ -23534,6 +23859,12 @@ "traits": { "smithy.api#documentation": "

The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.

" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configuration of the Apply button on a FilterDropDownControl.

" + } } }, "traits": { @@ -23816,6 +24147,12 @@ "traits": { "smithy.api#documentation": "

The display options of a control.

" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configuration of the Apply button on a FilterRelativeDateTimeControl.

" + } } }, "traits": { @@ -24311,6 +24648,18 @@ } } }, + "com.amazonaws.quicksight#FoldersForResourceArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.quicksight#Arn" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.quicksight#Font": { "type": "structure", "members": { @@ -27581,6 +27930,29 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.quicksight#IncludeFolderMembers": { + "type": "enum", + "members": { + "RECURSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RECURSE" + } + }, + "ONE_LEVEL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ONE_LEVEL" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, "com.amazonaws.quicksight#IncrementalRefresh": { "type": "structure", "members": { @@ -30837,6 +31209,124 @@ } } }, + "com.amazonaws.quicksight#ListFoldersForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#ListFoldersForResourceRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#ListFoldersForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + }, + { + "target": "com.amazonaws.quicksight#UnsupportedUserEditionException" + } + ], + "traits": { + "smithy.api#documentation": "

List all folders that a resource is a member of.

", + "smithy.api#http": { + "method": "GET", + "uri": "/accounts/{AwsAccountId}/resource/{ResourceArn}/folders", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Folders", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.quicksight#ListFoldersForResourceRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID for the Amazon Web Services account that contains the resource.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ResourceArn": { + "target": "com.amazonaws.quicksight#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) the resource whose folders you need to list.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The token for the next set of results, or null if there are no more results.

", + "smithy.api#httpQuery": "next-token" + } + }, + "MaxResults": { + "target": "com.amazonaws.quicksight#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be returned per request.

", + "smithy.api#httpQuery": "max-results" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#ListFoldersForResourceResponse": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

", + "smithy.api#httpResponseCode": {} + } + }, + "Folders": { + "target": "com.amazonaws.quicksight#FoldersForResourceArnList", + "traits": { + "smithy.api#documentation": "

A list that contains the Amazon Resource Names (ARNs) of all folders that the resource is a member of.

" + } + }, + "NextToken": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The token for the next set of results, or null if there are no more results.

" + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#ListFoldersRequest": { "type": "structure", "members": { @@ -35799,6 +36289,12 @@ "traits": { "smithy.api#documentation": "

The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.

" } + }, + "CommitMode": { + "target": "com.amazonaws.quicksight#CommitMode", + "traits": { + "smithy.api#documentation": "

The visibility configuration of the Apply button on a ParameterDropDownControl.

" + } } }, "traits": { @@ -36294,6 +36790,23 @@ } } }, + "com.amazonaws.quicksight#PersonalizationMode": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.quicksight#PhysicalTable": { "type": "union", "members": { @@ -38130,6 +38643,9 @@ { "target": "com.amazonaws.quicksight#DescribeNamespace" }, + { + "target": "com.amazonaws.quicksight#DescribeQPersonalizationConfiguration" + }, { "target": "com.amazonaws.quicksight#DescribeRefreshSchedule" }, @@ -38214,6 +38730,9 @@ { "target": "com.amazonaws.quicksight#ListFolders" }, + { + "target": "com.amazonaws.quicksight#ListFoldersForResource" + }, { "target": "com.amazonaws.quicksight#ListGroupMemberships" }, @@ -38382,6 +38901,9 @@ { "target": "com.amazonaws.quicksight#UpdatePublicSharingSettings" }, + { + "target": "com.amazonaws.quicksight#UpdateQPersonalizationConfiguration" + }, { "target": "com.amazonaws.quicksight#UpdateRefreshSchedule" }, @@ -45051,6 +45573,19 @@ "traits": { "smithy.api#documentation": "

An optional parameter that determines which validation strategy to use for the export job. If StrictModeForAllResources is set to TRUE, strict validation for every error is enforced. If it is set to FALSE, validation is skipped for specific UI errors that are shown as warnings. The default value for StrictModeForAllResources is FALSE.

" } + }, + "IncludeFolderMemberships": { + "target": "com.amazonaws.quicksight#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

A Boolean that determines if the exported asset carries over information about the folders that the asset is a member of.

" + } + }, + "IncludeFolderMembers": { + "target": "com.amazonaws.quicksight#IncludeFolderMembers", + "traits": { + "smithy.api#documentation": "

A setting that indicates whether you want to include folder assets. You can also use this setting to recusrsively include all subfolders of an exported folder.

" + } } }, "traits": { @@ -48745,6 +49280,20 @@ "target": "com.amazonaws.quicksight#TopicColumn" } }, + "com.amazonaws.quicksight#TopicConfigOptions": { + "type": "structure", + "members": { + "QBusinessInsightsEnabled": { + "target": "com.amazonaws.quicksight#NullableBoolean", + "traits": { + "smithy.api#documentation": "

Enables Amazon Q Business Insights for a Topic.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Configuration options for a Topic.

" + } + }, "com.amazonaws.quicksight#TopicConstantValue": { "type": "structure", "members": { @@ -48830,6 +49379,12 @@ "traits": { "smithy.api#documentation": "

The data sets that the topic is associated with.

" } + }, + "ConfigOptions": { + "target": "com.amazonaws.quicksight#TopicConfigOptions", + "traits": { + "smithy.api#documentation": "

Configuration options for a Topic.

" + } } }, "traits": { @@ -53249,6 +53804,97 @@ "smithy.api#output": {} } }, + "com.amazonaws.quicksight#UpdateQPersonalizationConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.quicksight#UpdateQPersonalizationConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.quicksight#UpdateQPersonalizationConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.quicksight#AccessDeniedException" + }, + { + "target": "com.amazonaws.quicksight#ConflictException" + }, + { + "target": "com.amazonaws.quicksight#InternalFailureException" + }, + { + "target": "com.amazonaws.quicksight#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.quicksight#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.quicksight#ResourceUnavailableException" + }, + { + "target": "com.amazonaws.quicksight#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates a personalization configuration.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/accounts/{AwsAccountId}/q-personalization-configuration", + "code": 200 + } + } + }, + "com.amazonaws.quicksight#UpdateQPersonalizationConfigurationRequest": { + "type": "structure", + "members": { + "AwsAccountId": { + "target": "com.amazonaws.quicksight#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the Amazon Web Services account account that contains the personalization configuration that the user wants to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "PersonalizationMode": { + "target": "com.amazonaws.quicksight#PersonalizationMode", + "traits": { + "smithy.api#documentation": "

An option to allow Amazon QuickSight to customize data stories with user specific metadata, specifically location and job information, in your IAM Identity Center instance.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.quicksight#UpdateQPersonalizationConfigurationResponse": { + "type": "structure", + "members": { + "PersonalizationMode": { + "target": "com.amazonaws.quicksight#PersonalizationMode", + "traits": { + "smithy.api#documentation": "

The personalization mode that is used for the personalization configuration.

" + } + }, + "RequestId": { + "target": "com.amazonaws.quicksight#String", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services request ID for this operation.

" + } + }, + "Status": { + "target": "com.amazonaws.quicksight#StatusCode", + "traits": { + "smithy.api#default": 0, + "smithy.api#documentation": "

The HTTP status of the request.

", + "smithy.api#httpResponseCode": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.quicksight#UpdateRefreshSchedule": { "type": "operation", "input": { diff --git a/models/rds-data.json b/models/rds-data.json index 16cdd4cdd4..4f627530b2 100644 --- a/models/rds-data.json +++ b/models/rds-data.json @@ -267,7 +267,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a SQL transaction.

\n \n

A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24\n hours.

\n

A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's\n committed, it's rolled back automatically.

\n

DDL statements inside a transaction cause an implicit commit. We recommend that you run each DDL statement in a separate\n ExecuteStatement call with continueAfterTimeout enabled.

\n
", + "smithy.api#documentation": "

Starts a SQL transaction.

\n \n

A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24\n hours.

\n

A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's\n committed, it's rolled back automatically.

\n

For Aurora MySQL, DDL statements inside a transaction cause an implicit commit. We recommend that you run each MySQL DDL statement in a separate\n ExecuteStatement call with continueAfterTimeout enabled.

\n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -661,7 +661,7 @@ "message": "The ExecuteSql API is deprecated, please use the ExecuteStatement API.", "since": "2019-03-21" }, - "smithy.api#documentation": "

Runs one or more SQL statements.

\n \n

This operation isn't supported for Aurora PostgreSQL Serverless v2 and provisioned DB clusters, and for Aurora Serverless v1 DB clusters, \n the operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation.

\n
", + "smithy.api#documentation": "

Runs one or more SQL statements.

\n \n

This operation isn't supported for Aurora Serverless v2 and provisioned DB clusters.\n For Aurora Serverless v1 DB clusters, the operation is deprecated.\n Use the BatchExecuteStatement or ExecuteStatement operation.

\n
", "smithy.api#http": { "code": 200, "method": "POST", @@ -1115,7 +1115,7 @@ }, "aws.protocols#restJson1": {}, "smithy.api#cors": {}, - "smithy.api#documentation": "RDS Data API\n

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora DB cluster. To run these\n statements, you use the RDS Data API (Data API).

\n

Data API is available with the following types of Aurora databases:

\n
    \n
  • \n

    Aurora PostgreSQL - Serverless v2, Serverless v1, and provisioned

    \n
  • \n
  • \n

    Aurora MySQL - Serverless v1 only

    \n
  • \n
\n

For more information about the Data API, see\n Using RDS Data API\n in the Amazon Aurora User Guide.

", + "smithy.api#documentation": "RDS Data API\n

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora DB cluster. To run these\n statements, you use the RDS Data API (Data API).

\n

Data API is available with the following types of Aurora databases:

\n
    \n
  • \n

    Aurora PostgreSQL - Serverless v2, provisioned, and Serverless v1

    \n
  • \n
  • \n

    Aurora MySQL - Serverless v2, provisioned, and Serverless v1

    \n
  • \n
\n

For more information about the Data API, see\n Using RDS Data API\n in the Amazon Aurora User Guide.

", "smithy.api#title": "AWS RDS DataService", "smithy.rules#endpointRuleSet": { "version": "1.0", diff --git a/models/rds.json b/models/rds.json index 546341cfcb..50c8233fff 100644 --- a/models/rds.json +++ b/models/rds.json @@ -2081,7 +2081,7 @@ "target": "com.amazonaws.rds#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The pending maintenance action to apply to this resource.

\n

Valid Values:

\n
    \n
  • \n

    \n ca-certificate-rotation\n

    \n
  • \n
  • \n

    \n db-upgrade\n

    \n
  • \n
  • \n

    \n hardware-maintenance\n

    \n
  • \n
  • \n

    \n os-upgrade\n

    \n
  • \n
  • \n

    \n system-update\n

    \n
  • \n
\n

For more information about these actions, see \n Maintenance actions for Amazon Aurora or \n Maintenance actions for Amazon RDS.

", + "smithy.api#documentation": "

The pending maintenance action to apply to this resource.

\n

Valid Values: system-update, db-upgrade, \n hardware-maintenance, ca-certificate-rotation\n

", "smithy.api#required": {} } }, @@ -2982,6 +2982,23 @@ "smithy.api#documentation": "

This data type is used as a response element in the ModifyDBCluster operation and \n contains changes that will be applied during the next maintenance window.

" } }, + "com.amazonaws.rds#ClusterScalabilityType": { + "type": "enum", + "members": { + "STANDARD": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "standard" + } + }, + "LIMITLESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "limitless" + } + } + } + }, "com.amazonaws.rds#ConnectionPoolConfiguration": { "type": "structure", "members": { @@ -4463,6 +4480,12 @@ "smithy.api#documentation": "

The network type of the DB cluster.

\n

The network type is determined by the DBSubnetGroup specified for the DB cluster. \n A DBSubnetGroup can support only the IPv4 protocol or the IPv4 and the IPv6 \n protocols (DUAL).

\n

For more information, see \n Working with a DB instance in a VPC in the \n Amazon Aurora User Guide.\n

\n

Valid for Cluster Type: Aurora DB clusters only

\n

Valid Values: IPV4 | DUAL\n

" } }, + "ClusterScalabilityType": { + "target": "com.amazonaws.rds#ClusterScalabilityType", + "traits": { + "smithy.api#documentation": "

Specifies the scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database.\n When set to standard (the default), the cluster uses normal DB instance creation.

\n

Valid for: Aurora DB clusters only

\n \n

You can't modify this setting after you create the DB cluster.

\n
" + } + }, "DBSystemId": { "target": "com.amazonaws.rds#String", "traits": { @@ -5023,7 +5046,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The license model information for this DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n

The default for RDS for Db2 is bring-your-own-license.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
" + "smithy.api#documentation": "

The license model information for this DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see Amazon RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n

The default for RDS for Db2 is bring-your-own-license.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
" } }, "Iops": { @@ -5457,7 +5480,7 @@ "DBParameterGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The name of the DB parameter group to associate with this DB instance.

\n

If you don't specify a value for DBParameterGroupName, then Amazon RDS\n uses the DBParameterGroup of the source DB instance for a same Region read\n replica, or the default DBParameterGroup for the specified DB engine for a\n cross-Region read replica.

\n

Specifying a parameter group for this operation is only supported for MySQL DB instances for cross-Region read replicas and for Oracle DB instances. It isn't supported for MySQL DB instances for same Region read replicas or for RDS Custom.

\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 letters, numbers, or hyphens.

    \n
  • \n
  • \n

    First character must be a letter.

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens.

    \n
  • \n
" + "smithy.api#documentation": "

The name of the DB parameter group to associate with this read replica DB\n instance.

\n

For Single-AZ or Multi-AZ DB instance read replica instances, if you don't specify a\n value for DBParameterGroupName, then Amazon RDS uses the\n DBParameterGroup of the source DB instance for a same Region read\n replica, or the default DBParameterGroup for the specified DB engine for a\n cross-Region read replica.

\n

For Multi-AZ DB cluster same Region read replica instances, if you don't specify a\n value for DBParameterGroupName, then Amazon RDS uses the default\n DBParameterGroup.

\n

Specifying a parameter group for this operation is only supported for MySQL DB\n instances for cross-Region read replicas, for Multi-AZ DB cluster read replica\n instances, and for Oracle DB instances. It isn't supported for MySQL DB instances for\n same Region read replicas or for RDS Custom.

\n

Constraints:

\n
    \n
  • \n

    Must be 1 to 255 letters, numbers, or hyphens.

    \n
  • \n
  • \n

    First character must be a letter.

    \n
  • \n
  • \n

    Can't end with a hyphen or contain two consecutive hyphens.

    \n
  • \n
" } }, "PubliclyAccessible": { @@ -6084,15 +6107,15 @@ { "target": "com.amazonaws.rds#InvalidDBClusterStateFault" }, - { - "target": "com.amazonaws.rds#InvalidMaxAcuFault" - }, { "target": "com.amazonaws.rds#InvalidVPCNetworkStateFault" }, { "target": "com.amazonaws.rds#MaxDBShardGroupLimitReached" }, + { + "target": "com.amazonaws.rds#NetworkTypeNotSupported" + }, { "target": "com.amazonaws.rds#UnsupportedDBEngineVersionFault" } @@ -6123,7 +6146,7 @@ "ComputeRedundancy": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to create standby instances for the DB shard group. Valid values are the following:

\n
    \n
  • \n

    0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.

    \n
  • \n
  • \n

    1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.

    \n
  • \n
  • \n

    2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

\n
    \n
  • \n

    0 - Creates a DB shard group without a standby DB shard group. This is the default value.

    \n
  • \n
  • \n

    1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

    \n
  • \n
  • \n

    2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

    \n
  • \n
" } }, "MaxACU": { @@ -6145,6 +6168,9 @@ "traits": { "smithy.api#documentation": "

Specifies whether the DB shard group is publicly accessible.

\n

When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from \n within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. \n Access to the DB shard group is ultimately controlled by the security group it uses. \n That public access is not permitted if the security group assigned to the DB shard group doesn't permit it.

\n

When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address.

\n

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

\n

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB shard group is private.

    \n
  • \n
  • \n

    If the default VPC in the target Region has an internet gateway attached to it, the DB shard group is public.

    \n
  • \n
\n

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

\n
    \n
  • \n

    If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB shard group is private.

    \n
  • \n
  • \n

    If the subnets are part of a VPC that has an internet gateway attached to it, the DB shard group is public.

    \n
  • \n
" } + }, + "Tags": { + "target": "com.amazonaws.rds#TagList" } }, "traits": { @@ -6607,6 +6633,12 @@ "traits": { "smithy.api#documentation": "

Specifies whether to enable storage encryption for the new global database cluster.

\n

Constraints:

\n
    \n
  • \n

    Can't be specified if SourceDBClusterIdentifier is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.

    \n
  • \n
" } + }, + "Tags": { + "target": "com.amazonaws.rds#TagList", + "traits": { + "smithy.api#documentation": "

Tags to assign to the global cluster.

" + } } }, "traits": { @@ -7532,6 +7564,12 @@ "smithy.api#documentation": "

The storage throughput for the DB cluster. The throughput is automatically set based on the IOPS that you provision, and is not configurable.

\n

This setting is only for non-Aurora Multi-AZ DB clusters.

" } }, + "ClusterScalabilityType": { + "target": "com.amazonaws.rds#ClusterScalabilityType", + "traits": { + "smithy.api#documentation": "

The scalability mode of the Aurora DB cluster. When set to limitless, the cluster operates as an Aurora Limitless Database.\n When set to standard (the default), the cluster uses normal DB instance creation.

" + } + }, "CertificateDetails": { "target": "com.amazonaws.rds#CertificateDetails" }, @@ -11111,7 +11149,7 @@ "ComputeRedundancy": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

Specifies whether to create standby instances for the DB shard group. Valid values are the following:

\n
    \n
  • \n

    0 - Creates a single, primary DB instance for each physical shard. This is the default value, and the only one supported for the preview.

    \n
  • \n
  • \n

    1 - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard.

    \n
  • \n
  • \n

    2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard.

    \n
  • \n
" + "smithy.api#documentation": "

Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

\n
    \n
  • \n

    0 - Creates a DB shard group without a standby DB shard group. This is the default value.

    \n
  • \n
  • \n

    1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

    \n
  • \n
  • \n

    2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

    \n
  • \n
" } }, "Status": { @@ -11131,6 +11169,15 @@ "traits": { "smithy.api#documentation": "

The connection endpoint for the DB shard group.

" } + }, + "DBShardGroupArn": { + "target": "com.amazonaws.rds#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) for the DB shard group.

" + } + }, + "TagList": { + "target": "com.amazonaws.rds#TagList" } } }, @@ -19485,6 +19532,9 @@ "traits": { "smithy.api#documentation": "

A data object containing all properties for the current state of an in-process or pending switchover or failover process for this global cluster (Aurora global database).\n This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster.

" } + }, + "TagList": { + "target": "com.amazonaws.rds#TagList" } }, "traits": { @@ -20502,23 +20552,6 @@ "smithy.api#httpError": 400 } }, - "com.amazonaws.rds#InvalidMaxAcuFault": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.rds#ExceptionMessage" - } - }, - "traits": { - "aws.protocols#awsQueryError": { - "code": "InvalidMaxAcu", - "httpResponseCode": 400 - }, - "smithy.api#documentation": "

The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs).

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, "com.amazonaws.rds#InvalidOptionGroupStateFault": { "type": "structure", "members": { @@ -20779,7 +20812,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all tags on an Amazon RDS resource.

\n

For an overview on tagging an Amazon RDS resource, \n see Tagging Amazon RDS Resources \n in the Amazon RDS User Guide.

", + "smithy.api#documentation": "

Lists all tags on an Amazon RDS resource.

\n

For an overview on tagging an Amazon RDS resource, \n see Tagging Amazon RDS Resources in the Amazon RDS User Guide\n or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.

", "smithy.api#examples": [ { "title": "To list tags on an Amazon RDS resource", @@ -22869,9 +22902,6 @@ }, { "target": "com.amazonaws.rds#InvalidDBClusterStateFault" - }, - { - "target": "com.amazonaws.rds#InvalidMaxAcuFault" } ], "traits": { @@ -22900,6 +22930,12 @@ "traits": { "smithy.api#documentation": "

The minimum capacity of the DB shard group in Aurora capacity units (ACUs).

" } + }, + "ComputeRedundancy": { + "target": "com.amazonaws.rds#IntegerOptional", + "traits": { + "smithy.api#documentation": "

Specifies whether to create standby DB shard groups for the DB shard group. Valid values are the following:

\n
    \n
  • \n

    0 - Creates a DB shard group without a standby DB shard group. This is the default value.

    \n
  • \n
  • \n

    1 - Creates a DB shard group with a standby DB shard group in a different Availability Zone (AZ).

    \n
  • \n
  • \n

    2 - Creates a DB shard group with two standby DB shard groups in two different AZs.

    \n
  • \n
" + } } }, "traits": { @@ -24734,7 +24770,7 @@ "Action": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The type of pending maintenance action that is available for the resource.

\n

For more information about maintenance actions, see Maintaining a DB instance.

\n

Valid Values:

\n
    \n
  • \n

    \n ca-certificate-rotation\n

    \n
  • \n
  • \n

    \n db-upgrade\n

    \n
  • \n
  • \n

    \n hardware-maintenance\n

    \n
  • \n
  • \n

    \n os-upgrade\n

    \n
  • \n
  • \n

    \n system-update\n

    \n
  • \n
\n

For more information about these actions, see \n Maintenance actions for Amazon Aurora or \n Maintenance actions for Amazon RDS.

" + "smithy.api#documentation": "

The type of pending maintenance action that is available for the resource.

\n

For more information about maintenance actions, see Maintaining a DB instance.

\n

Valid Values: system-update | db-upgrade | hardware-maintenance | ca-certificate-rotation\n

" } }, "AutoAppliedAfterDate": { @@ -26159,7 +26195,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes metadata tags from an Amazon RDS resource.

\n

For an overview on tagging an Amazon RDS resource, \n see Tagging Amazon RDS Resources \n in the Amazon RDS User Guide.\n

", + "smithy.api#documentation": "

Removes metadata tags from an Amazon RDS resource.

\n

For an overview on tagging an Amazon RDS resource, \n see Tagging Amazon RDS Resources in the Amazon RDS User Guide\n or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.

", "smithy.api#examples": [ { "title": "To remove tags from a resource", @@ -27796,7 +27832,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most\n of the source's original configuration, including the default security group and DB parameter group. By default, the new DB\n instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group\n associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.

\n

If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance\n before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you\n have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as\n the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original\n DB instance with the DB instance created from the snapshot.

\n

If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier\n must be the ARN of the shared DB snapshot.

\n \n

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

\n
", + "smithy.api#documentation": "

Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with most\n of the source's original configuration, including the default security group and DB parameter group. By default, the new DB\n instance is created as a Single-AZ deployment, except when the instance is a SQL Server instance that has an option group\n associated with mirroring. In this case, the instance becomes a Multi-AZ deployment, not a Single-AZ deployment.

\n

If you want to replace your original DB instance with the new, restored DB instance, then rename your original DB instance\n before you call the RestoreDBInstanceFromDBSnapshot operation. RDS doesn't allow two DB instances with the same name. After you\n have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as\n the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot operation. The result is that you replace the original\n DB instance with the DB instance created from the snapshot.

\n

If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier\n must be the ARN of the shared DB snapshot.

\n

To restore from a DB snapshot with an unsupported engine version, you must first upgrade the \n engine version of the snapshot. For more information about upgrading a RDS for MySQL DB snapshot engine version, see Upgrading a MySQL DB snapshot engine version. \n For more information about upgrading a RDS for PostgreSQL DB snapshot engine version, Upgrading a PostgreSQL DB snapshot engine version.

\n \n

This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.

\n
", "smithy.api#examples": [ { "title": "To restore a DB instance from a DB snapshot", @@ -27892,7 +27928,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

License model information for the restored DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
\n

Default: Same as the source.

" + "smithy.api#documentation": "

License model information for the restored DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see Amazon RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
\n

Default: Same as the source.

" } }, "DBName": { @@ -28764,7 +28800,7 @@ "LicenseModel": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The license model information for the restored DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
\n

Default: Same as the source.

" + "smithy.api#documentation": "

The license model information for the restored DB instance.

\n \n

License models for RDS for Db2 require additional configuration. The Bring Your\n Own License (BYOL) model requires a custom parameter group and an Amazon Web Services License Manager self-managed license. The Db2 license through\n Amazon Web Services Marketplace model requires an Amazon Web Services Marketplace subscription. For more\n information, see Amazon RDS for Db2 licensing\n options in the Amazon RDS User Guide.

\n
\n

This setting doesn't apply to Amazon Aurora or RDS Custom DB instances.

\n

Valid Values:

\n
    \n
  • \n

    RDS for Db2 - bring-your-own-license | marketplace-license\n

    \n
  • \n
  • \n

    RDS for MariaDB - general-public-license\n

    \n
  • \n
  • \n

    RDS for Microsoft SQL Server - license-included\n

    \n
  • \n
  • \n

    RDS for MySQL - general-public-license\n

    \n
  • \n
  • \n

    RDS for Oracle - bring-your-own-license | license-included\n

    \n
  • \n
  • \n

    RDS for PostgreSQL - postgresql-license\n

    \n
  • \n
\n

Default: Same as the source.

" } }, "DBName": { diff --git a/models/redshift.json b/models/redshift.json index 3c33c7abb7..673acc0057 100644 --- a/models/redshift.json +++ b/models/redshift.json @@ -2814,7 +2814,7 @@ "target": "com.amazonaws.redshift#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The node type to be provisioned for the cluster. For information about node types,\n go to Working with\n Clusters in the Amazon Redshift Cluster Management Guide.

\n

Valid Values: \n dc2.large | dc2.8xlarge | \n ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

", + "smithy.api#documentation": "

The node type to be provisioned for the cluster. For information about node types,\n go to Working with\n Clusters in the Amazon Redshift Cluster Management Guide.

\n

Valid Values: \n dc2.large | dc2.8xlarge | \n ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

", "smithy.api#required": {} } }, @@ -3814,6 +3814,103 @@ "smithy.api#output": {} } }, + "com.amazonaws.redshift#CreateIntegration": { + "type": "operation", + "input": { + "target": "com.amazonaws.redshift#CreateIntegrationMessage" + }, + "output": { + "target": "com.amazonaws.redshift#Integration" + }, + "errors": [ + { + "target": "com.amazonaws.redshift#IntegrationAlreadyExistsFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationConflictOperationFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationQuotaExceededFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationSourceNotFoundFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationTargetNotFoundFault" + }, + { + "target": "com.amazonaws.redshift#InvalidClusterStateFault" + }, + { + "target": "com.amazonaws.redshift#InvalidTagFault" + }, + { + "target": "com.amazonaws.redshift#TagLimitExceededFault" + }, + { + "target": "com.amazonaws.redshift#UnsupportedOperationFault" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a zero-ETL integration with Amazon Redshift.

" + } + }, + "com.amazonaws.redshift#CreateIntegrationMessage": { + "type": "structure", + "members": { + "SourceArn": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the database to use as the source for replication.

", + "smithy.api#required": {} + } + }, + "TargetArn": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication.

", + "smithy.api#required": {} + } + }, + "IntegrationName": { + "target": "com.amazonaws.redshift#IntegrationName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the integration.

", + "smithy.api#required": {} + } + }, + "KMSKeyId": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

An Key Management Service (KMS) key identifier for the key to use to\n encrypt the integration. If you don't specify an encryption key, the default\n Amazon Web Services owned key is used.

" + } + }, + "TagList": { + "target": "com.amazonaws.redshift#TagList", + "traits": { + "smithy.api#documentation": "

A list of tags.

" + } + }, + "AdditionalEncryptionContext": { + "target": "com.amazonaws.redshift#EncryptionContextMap", + "traits": { + "smithy.api#documentation": "

An optional set of non-secret key–value pairs that contains additional contextual\n information about the data. For more information, see Encryption\n context in the Amazon Web Services Key Management Service Developer\n Guide.

\n

You can only include this parameter if you specify the KMSKeyId parameter.

" + } + }, + "Description": { + "target": "com.amazonaws.redshift#IntegrationDescription", + "traits": { + "smithy.api#documentation": "

A description of the integration.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.redshift#CreateRedshiftIdcApplication": { "type": "operation", "input": { @@ -5252,6 +5349,48 @@ "smithy.api#input": {} } }, + "com.amazonaws.redshift#DeleteIntegration": { + "type": "operation", + "input": { + "target": "com.amazonaws.redshift#DeleteIntegrationMessage" + }, + "output": { + "target": "com.amazonaws.redshift#Integration" + }, + "errors": [ + { + "target": "com.amazonaws.redshift#IntegrationConflictOperationFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationConflictStateFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationNotFoundFault" + }, + { + "target": "com.amazonaws.redshift#UnsupportedOperationFault" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a zero-ETL integration with Amazon Redshift.

" + } + }, + "com.amazonaws.redshift#DeleteIntegrationMessage": { + "type": "structure", + "members": { + "IntegrationArn": { + "target": "com.amazonaws.redshift#IntegrationArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The unique identifier of the integration to delete.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.redshift#DeletePartner": { "type": "operation", "input": { @@ -7216,6 +7355,135 @@ "smithy.api#input": {} } }, + "com.amazonaws.redshift#DescribeIntegrations": { + "type": "operation", + "input": { + "target": "com.amazonaws.redshift#DescribeIntegrationsMessage" + }, + "output": { + "target": "com.amazonaws.redshift#IntegrationsMessage" + }, + "errors": [ + { + "target": "com.amazonaws.redshift#IntegrationNotFoundFault" + }, + { + "target": "com.amazonaws.redshift#UnsupportedOperationFault" + } + ], + "traits": { + "smithy.api#documentation": "

Describes one or more zero-ETL integrations with Amazon Redshift.

", + "smithy.api#paginated": { + "inputToken": "Marker", + "outputToken": "Marker", + "items": "Integrations", + "pageSize": "MaxRecords" + } + } + }, + "com.amazonaws.redshift#DescribeIntegrationsFilter": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.redshift#DescribeIntegrationsFilterName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Specifies the type of integration filter.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.redshift#DescribeIntegrationsFilterValueList", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Specifies the values to filter on.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A set of elements to filter the returned integrations.

" + } + }, + "com.amazonaws.redshift#DescribeIntegrationsFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.redshift#DescribeIntegrationsFilter", + "traits": { + "smithy.api#xmlName": "DescribeIntegrationsFilter" + } + } + }, + "com.amazonaws.redshift#DescribeIntegrationsFilterName": { + "type": "enum", + "members": { + "INTEGRATION_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "integration-arn" + } + }, + "SOURCE_ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "source-arn" + } + }, + "SOURCE_TYPES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "source-types" + } + }, + "STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "status" + } + } + } + }, + "com.amazonaws.redshift#DescribeIntegrationsFilterValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#xmlName": "Value" + } + } + }, + "com.amazonaws.redshift#DescribeIntegrationsMessage": { + "type": "structure", + "members": { + "IntegrationArn": { + "target": "com.amazonaws.redshift#IntegrationArn", + "traits": { + "smithy.api#documentation": "

The unique identifier of the integration.

" + } + }, + "MaxRecords": { + "target": "com.amazonaws.redshift#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The maximum number of response records to return in each call. If the number of\n remaining response records exceeds the specified MaxRecords value, a value\n is returned in a marker field of the response. You can retrieve the next\n set of records by retrying the command with the returned marker value.

\n

Default: 100\n

\n

Constraints: minimum 20, maximum 100.

" + } + }, + "Marker": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

An optional pagination token provided by a previous DescribeIntegrations\n request. If this parameter is specified, the response includes only records beyond the\n marker, up to the value specified by MaxRecords.

" + } + }, + "Filters": { + "target": "com.amazonaws.redshift#DescribeIntegrationsFilterList", + "traits": { + "smithy.api#documentation": "

A filter that specifies one or more resources to return.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.redshift#DescribeLoggingStatus": { "type": "operation", "input": { @@ -8117,7 +8385,7 @@ "ResourceType": { "target": "com.amazonaws.redshift#String", "traits": { - "smithy.api#documentation": "

The type of resource with which you want to view tags. Valid resource types are:

\n
    \n
  • \n

    Cluster

    \n
  • \n
  • \n

    CIDR/IP

    \n
  • \n
  • \n

    EC2 security group

    \n
  • \n
  • \n

    Snapshot

    \n
  • \n
  • \n

    Cluster security group

    \n
  • \n
  • \n

    Subnet group

    \n
  • \n
  • \n

    HSM connection

    \n
  • \n
  • \n

    HSM certificate

    \n
  • \n
  • \n

    Parameter group

    \n
  • \n
  • \n

    Snapshot copy grant

    \n
  • \n
\n

For more information about Amazon Redshift resource types and constructing ARNs, go to\n Specifying Policy Elements: Actions, Effects, Resources, and Principals in\n the Amazon Redshift Cluster Management Guide.

" + "smithy.api#documentation": "

The type of resource with which you want to view tags. Valid resource types are:

\n
    \n
  • \n

    Cluster

    \n
  • \n
  • \n

    CIDR/IP

    \n
  • \n
  • \n

    EC2 security group

    \n
  • \n
  • \n

    Snapshot

    \n
  • \n
  • \n

    Cluster security group

    \n
  • \n
  • \n

    Subnet group

    \n
  • \n
  • \n

    HSM connection

    \n
  • \n
  • \n

    HSM certificate

    \n
  • \n
  • \n

    Parameter group

    \n
  • \n
  • \n

    Snapshot copy grant

    \n
  • \n
  • \n

    Integration (zero-ETL integration)

    \n \n

    To describe the tags associated with an integration, don't specify ResourceType, \n instead specify the ResourceName of the integration.

    \n
    \n
  • \n
\n

For more information about Amazon Redshift resource types and constructing ARNs, go to\n Specifying Policy Elements: Actions, Effects, Resources, and Principals in\n the Amazon Redshift Cluster Management Guide.

" } }, "MaxRecords": { @@ -8226,6 +8494,16 @@ "smithy.api#input": {} } }, + "com.amazonaws.redshift#Description": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#pattern": "^.*$" + } + }, "com.amazonaws.redshift#DisableLogging": { "type": "operation", "input": { @@ -8506,9 +8784,9 @@ } }, "S3KeyPrefix": { - "target": "com.amazonaws.redshift#String", + "target": "com.amazonaws.redshift#S3KeyPrefixValue", "traits": { - "smithy.api#documentation": "

The prefix applied to the log file names.

\n

Constraints:

\n
    \n
  • \n

    Cannot exceed 512 characters

    \n
  • \n
  • \n

    Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash\n (\\), or control characters. The hexadecimal codes for invalid characters are:

    \n
      \n
    • \n

      x00 to x20

      \n
    • \n
    • \n

      x22

      \n
    • \n
    • \n

      x27

      \n
    • \n
    • \n

      x5c

      \n
    • \n
    • \n

      x7f or larger

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The prefix applied to the log file names.

\n

Valid characters are any letter from any language, any whitespace character, any numeric character, and the following characters: \n underscore (_), period (.), colon (:), slash (/), equal (=), plus (+), backslash (\\),\n hyphen (-), at symbol (@).

" } }, "LogDestinationType": { @@ -8630,6 +8908,15 @@ "smithy.api#output": {} } }, + "com.amazonaws.redshift#EncryptionContextMap": { + "type": "map", + "key": { + "target": "com.amazonaws.redshift#String" + }, + "value": { + "target": "com.amazonaws.redshift#String" + } + }, "com.amazonaws.redshift#Endpoint": { "type": "structure", "members": { @@ -10172,6 +10459,148 @@ "com.amazonaws.redshift#IntegerOptional": { "type": "integer" }, + "com.amazonaws.redshift#Integration": { + "type": "structure", + "members": { + "IntegrationArn": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the integration.

" + } + }, + "IntegrationName": { + "target": "com.amazonaws.redshift#IntegrationName", + "traits": { + "smithy.api#documentation": "

The name of the integration.

" + } + }, + "SourceArn": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the database used as the source for\n replication.

" + } + }, + "TargetArn": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Redshift data warehouse to use as the target for replication.

" + } + }, + "Status": { + "target": "com.amazonaws.redshift#ZeroETLIntegrationStatus", + "traits": { + "smithy.api#documentation": "

The current status of the integration.

" + } + }, + "Errors": { + "target": "com.amazonaws.redshift#IntegrationErrorList", + "traits": { + "smithy.api#documentation": "

Any errors associated with the integration.

" + } + }, + "CreateTime": { + "target": "com.amazonaws.redshift#TStamp", + "traits": { + "smithy.api#documentation": "

The time (UTC) when the integration was created.

" + } + }, + "Description": { + "target": "com.amazonaws.redshift#Description", + "traits": { + "smithy.api#documentation": "

The description of the integration.

" + } + }, + "KMSKeyId": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

The Key Management Service (KMS) key identifier for the key used to\n encrypt the integration.

" + } + }, + "AdditionalEncryptionContext": { + "target": "com.amazonaws.redshift#EncryptionContextMap", + "traits": { + "smithy.api#documentation": "

The encryption context for the integration. For more information, \n see Encryption context in the Amazon Web Services Key Management Service Developer\n Guide.

" + } + }, + "Tags": { + "target": "com.amazonaws.redshift#TagList", + "traits": { + "smithy.api#documentation": "

The list of tags associated with the integration.

" + } + } + } + }, + "com.amazonaws.redshift#IntegrationAlreadyExistsFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.redshift#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "IntegrationAlreadyExistsFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

The integration you are trying to create already exists.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.redshift#IntegrationArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:redshift:[a-z0-9\\-]*:[0-9]*:integration:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + } + }, + "com.amazonaws.redshift#IntegrationConflictOperationFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.redshift#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "IntegrationConflictOperationFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

A conflicting conditional operation is currently in progress against this resource.\n This typically occurs when there are multiple requests being made to the same resource at the same time,\n and these requests conflict with each other.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.redshift#IntegrationConflictStateFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.redshift#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "IntegrationConflictStateFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

The integration is in an invalid state and can't perform the requested operation.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.redshift#IntegrationDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1000 + }, + "smithy.api#pattern": "^.*$" + } + }, "com.amazonaws.redshift#IntegrationError": { "type": "structure", "members": { @@ -10203,6 +10632,25 @@ } } }, + "com.amazonaws.redshift#IntegrationList": { + "type": "list", + "member": { + "target": "com.amazonaws.redshift#Integration", + "traits": { + "smithy.api#xmlName": "Integration" + } + } + }, + "com.amazonaws.redshift#IntegrationName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 63 + }, + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$" + } + }, "com.amazonaws.redshift#IntegrationNotFoundFault": { "type": "structure", "members": { @@ -10220,6 +10668,77 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.redshift#IntegrationQuotaExceededFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.redshift#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "IntegrationQuotaExceededFault", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

You can't create any more zero-ETL integrations because the quota has been reached.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.redshift#IntegrationSourceNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.redshift#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "IntegrationSourceNotFoundFault", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

The specified integration source can't be found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.redshift#IntegrationTargetNotFoundFault": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.redshift#ExceptionMessage" + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "IntegrationTargetNotFoundFault", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

The specified integration target can't be found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.redshift#IntegrationsMessage": { + "type": "structure", + "members": { + "Marker": { + "target": "com.amazonaws.redshift#String", + "traits": { + "smithy.api#documentation": "

A value that indicates the starting point for the next set of response records in a subsequent request. \n If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. \n If the Marker field is empty, all response records have been retrieved for the request.

" + } + }, + "Integrations": { + "target": "com.amazonaws.redshift#IntegrationList", + "traits": { + "smithy.api#documentation": "

List of integrations that are described.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.redshift#InvalidAuthenticationProfileRequestFault": { "type": "structure", "members": { @@ -10934,7 +11453,7 @@ } }, "S3KeyPrefix": { - "target": "com.amazonaws.redshift#String", + "target": "com.amazonaws.redshift#S3KeyPrefixValue", "traits": { "smithy.api#documentation": "

The prefix applied to the log file names.

" } @@ -11453,7 +11972,7 @@ "NodeType": { "target": "com.amazonaws.redshift#String", "traits": { - "smithy.api#documentation": "

The new node type of the cluster. If you specify a new node type, you must also\n specify the number of nodes parameter.

\n

\nFor more information about resizing clusters, go to \nResizing Clusters in Amazon Redshift \nin the Amazon Redshift Cluster Management Guide.

\n

Valid Values: \n dc2.large | dc2.8xlarge | \n ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

" + "smithy.api#documentation": "

The new node type of the cluster. If you specify a new node type, you must also\n specify the number of nodes parameter.

\n

\nFor more information about resizing clusters, go to \nResizing Clusters in Amazon Redshift \nin the Amazon Redshift Cluster Management Guide.

\n

Valid Values: \n dc2.large | dc2.8xlarge | \n ra3.large | ra3.xlplus | ra3.4xlarge | ra3.16xlarge\n

" } }, "NumberOfNodes": { @@ -12110,6 +12629,63 @@ "smithy.api#output": {} } }, + "com.amazonaws.redshift#ModifyIntegration": { + "type": "operation", + "input": { + "target": "com.amazonaws.redshift#ModifyIntegrationMessage" + }, + "output": { + "target": "com.amazonaws.redshift#Integration" + }, + "errors": [ + { + "target": "com.amazonaws.redshift#IntegrationAlreadyExistsFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationConflictOperationFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationConflictStateFault" + }, + { + "target": "com.amazonaws.redshift#IntegrationNotFoundFault" + }, + { + "target": "com.amazonaws.redshift#UnsupportedOperationFault" + } + ], + "traits": { + "smithy.api#documentation": "

Modifies a zero-ETL integration with Amazon Redshift.

" + } + }, + "com.amazonaws.redshift#ModifyIntegrationMessage": { + "type": "structure", + "members": { + "IntegrationArn": { + "target": "com.amazonaws.redshift#IntegrationArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The unique identifier of the integration to modify.

", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.redshift#IntegrationDescription", + "traits": { + "smithy.api#documentation": "

A new description for the integration.

" + } + }, + "IntegrationName": { + "target": "com.amazonaws.redshift#IntegrationName", + "traits": { + "smithy.api#documentation": "

A new name for the integration.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.redshift#ModifyRedshiftIdcApplication": { "type": "operation", "input": { @@ -13754,6 +14330,9 @@ { "target": "com.amazonaws.redshift#CreateHsmConfiguration" }, + { + "target": "com.amazonaws.redshift#CreateIntegration" + }, { "target": "com.amazonaws.redshift#CreateRedshiftIdcApplication" }, @@ -13808,6 +14387,9 @@ { "target": "com.amazonaws.redshift#DeleteHsmConfiguration" }, + { + "target": "com.amazonaws.redshift#DeleteIntegration" + }, { "target": "com.amazonaws.redshift#DeletePartner" }, @@ -13904,6 +14486,9 @@ { "target": "com.amazonaws.redshift#DescribeInboundIntegrations" }, + { + "target": "com.amazonaws.redshift#DescribeIntegrations" + }, { "target": "com.amazonaws.redshift#DescribeLoggingStatus" }, @@ -14027,6 +14612,9 @@ { "target": "com.amazonaws.redshift#ModifyEventSubscription" }, + { + "target": "com.amazonaws.redshift#ModifyIntegration" + }, { "target": "com.amazonaws.redshift#ModifyRedshiftIdcApplication" }, @@ -15789,7 +16377,7 @@ } ], "traits": { - "smithy.api#documentation": "

Changes the size of the cluster. You can change the cluster's type, or change the\n number or type of nodes. The default behavior is to use the elastic resize method. With\n an elastic resize, your cluster is available for read and write operations more quickly\n than with the classic resize method.

\n

Elastic resize operations have the following restrictions:

\n
    \n
  • \n

    You can only resize clusters of the following types:

    \n
      \n
    • \n

      dc2.large

      \n
    • \n
    • \n

      dc2.8xlarge

      \n
    • \n
    • \n

      ra3.xlplus

      \n
    • \n
    • \n

      ra3.4xlarge

      \n
    • \n
    • \n

      ra3.16xlarge

      \n
    • \n
    \n
  • \n
  • \n

    The type of nodes that you add must match the node type for the\n cluster.

    \n
  • \n
" + "smithy.api#documentation": "

Changes the size of the cluster. You can change the cluster's type, or change the\n number or type of nodes. The default behavior is to use the elastic resize method. With\n an elastic resize, your cluster is available for read and write operations more quickly\n than with the classic resize method.

\n

Elastic resize operations have the following restrictions:

\n
    \n
  • \n

    You can only resize clusters of the following types:

    \n
      \n
    • \n

      dc2.large

      \n
    • \n
    • \n

      dc2.8xlarge

      \n
    • \n
    • \n

      ra3.large

      \n
    • \n
    • \n

      ra3.xlplus

      \n
    • \n
    • \n

      ra3.4xlarge

      \n
    • \n
    • \n

      ra3.16xlarge

      \n
    • \n
    \n
  • \n
  • \n

    The type of nodes that you add must match the node type for the\n cluster.

    \n
  • \n
" } }, "com.amazonaws.redshift#ResizeClusterMessage": { @@ -16912,6 +17500,16 @@ "smithy.api#output": {} } }, + "com.amazonaws.redshift#S3KeyPrefixValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*$" + } + }, "com.amazonaws.redshift#SNSInvalidTopicFault": { "type": "structure", "members": { diff --git a/models/resiliencehub.json b/models/resiliencehub.json index fd69f005fa..e15e88b48c 100644 --- a/models/resiliencehub.json +++ b/models/resiliencehub.json @@ -319,30 +319,38 @@ } }, "com.amazonaws.resiliencehub#AlarmType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Metric", - "name": "METRIC" - }, - { - "value": "Composite", - "name": "COMPOSITE" - }, - { - "value": "Canary", - "name": "CANARY" - }, - { - "value": "Logs", - "name": "LOGS" - }, - { - "value": "Event", - "name": "EVENT" + "type": "enum", + "members": { + "METRIC": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Metric" + } + }, + "COMPOSITE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Composite" + } + }, + "CANARY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Canary" + } + }, + "LOGS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Logs" + } + }, + "EVENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Event" } - ] + } } }, "com.amazonaws.resiliencehub#App": { @@ -459,6 +467,12 @@ "traits": { "smithy.api#documentation": "

Recovery Point Objective (RPO) in seconds.

" } + }, + "awsApplicationArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" + } } }, "traits": { @@ -582,7 +596,7 @@ "summary": { "target": "com.amazonaws.resiliencehub#AssessmentSummary", "traits": { - "smithy.api#documentation": "

Indicates a concise summary that provides an overview of the Resilience Hub assessment.

" + "smithy.api#documentation": "

Indicates the AI-generated summary for the Resilience Hub assessment, providing a concise overview that highlights the top risks and recommendations.

\n \n

This property is available only in the US East (N. Virginia) Region.

\n
" } } }, @@ -591,18 +605,20 @@ } }, "com.amazonaws.resiliencehub#AppAssessmentScheduleType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Disabled", - "name": "DISABLED" - }, - { - "value": "Daily", - "name": "DAILY" + "type": "enum", + "members": { + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Disabled" + } + }, + "DAILY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Daily" } - ] + } } }, "com.amazonaws.resiliencehub#AppAssessmentSummary": { @@ -707,34 +723,44 @@ } }, "com.amazonaws.resiliencehub#AppComplianceStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PolicyBreached", - "name": "POLICY_BREACHED" - }, - { - "value": "PolicyMet", - "name": "POLICY_MET" - }, - { - "value": "NotAssessed", - "name": "NOT_ASSESSED" - }, - { - "value": "ChangesDetected", - "name": "CHANGES_DETECTED" - }, - { - "value": "NotApplicable", - "name": "NOT_APPLICABLE" - }, - { - "value": "MissingPolicy", - "name": "MISSING_POLICY" + "type": "enum", + "members": { + "POLICY_BREACHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PolicyBreached" + } + }, + "POLICY_MET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PolicyMet" + } + }, + "NOT_ASSESSED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotAssessed" + } + }, + "CHANGES_DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ChangesDetected" + } + }, + "NOT_APPLICABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotApplicable" + } + }, + "MISSING_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MissingPolicy" } - ] + } } }, "com.amazonaws.resiliencehub#AppComponent": { @@ -828,22 +854,26 @@ } }, "com.amazonaws.resiliencehub#AppDriftStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "NotChecked", - "name": "NOT_CHECKED" - }, - { - "value": "NotDetected", - "name": "NOT_DETECTED" - }, - { - "value": "Detected", - "name": "DETECTED" + "type": "enum", + "members": { + "NOT_CHECKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotChecked" + } + }, + "NOT_DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotDetected" } - ] + }, + "DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Detected" + } + } } }, "com.amazonaws.resiliencehub#AppInputSource": { @@ -899,18 +929,20 @@ } }, "com.amazonaws.resiliencehub#AppStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Active", - "name": "ACTIVE" - }, - { - "value": "Deleting", - "name": "DELETING" + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" } - ] + } } }, "com.amazonaws.resiliencehub#AppSummary": { @@ -991,6 +1023,12 @@ "traits": { "smithy.api#documentation": "

Recovery Point Objective (RPO) in seconds.

" } + }, + "awsApplicationArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" + } } }, "traits": { @@ -1074,18 +1112,20 @@ } }, "com.amazonaws.resiliencehub#AssessmentInvoker": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "User", - "name": "USER" - }, - { - "value": "System", - "name": "SYSTEM" + "type": "enum", + "members": { + "USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "User" + } + }, + "SYSTEM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "System" } - ] + } } }, "com.amazonaws.resiliencehub#AssessmentRiskRecommendation": { @@ -1121,26 +1161,32 @@ } }, "com.amazonaws.resiliencehub#AssessmentStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Pending", - "name": "PENDING" - }, - { - "value": "InProgress", - "name": "INPROGRESS" - }, - { - "value": "Failed", - "name": "FAILED" - }, - { - "value": "Success", - "name": "SUCCESS" + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Pending" + } + }, + "INPROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" } - ] + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" + } + } } }, "com.amazonaws.resiliencehub#AssessmentStatusList": { @@ -2400,8 +2446,7 @@ "item": { "target": "com.amazonaws.resiliencehub#UpdateRecommendationStatusItem", "traits": { - "smithy.api#documentation": "

The operational recommendation item.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The operational recommendation item.

" } }, "excluded": { @@ -2510,26 +2555,32 @@ } }, "com.amazonaws.resiliencehub#ComplianceStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "PolicyBreached", - "name": "POLICY_BREACHED" - }, - { - "value": "PolicyMet", - "name": "POLICY_MET" - }, - { - "value": "NotApplicable", - "name": "NOT_APPLICABLE" - }, - { - "value": "MissingPolicy", - "name": "MISSING_POLICY" + "type": "enum", + "members": { + "POLICY_BREACHED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PolicyBreached" + } + }, + "POLICY_MET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PolicyMet" + } + }, + "NOT_APPLICABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotApplicable" + } + }, + "MISSING_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MissingPolicy" } - ] + } } }, "com.amazonaws.resiliencehub#ComponentCompliancesList": { @@ -2651,34 +2702,44 @@ } }, "com.amazonaws.resiliencehub#ConfigRecommendationOptimizationType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "LeastCost", - "name": "LEAST_COST" - }, - { - "value": "LeastChange", - "name": "LEAST_CHANGE" - }, - { - "value": "BestAZRecovery", - "name": "BEST_AZ_RECOVERY" - }, - { - "value": "LeastErrors", - "name": "LEAST_ERRORS" - }, - { - "value": "BestAttainable", - "name": "BEST_ATTAINABLE" - }, - { - "value": "BestRegionRecovery", - "name": "BEST_REGION_RECOVERY" + "type": "enum", + "members": { + "LEAST_COST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LeastCost" + } + }, + "LEAST_CHANGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LeastChange" + } + }, + "BEST_AZ_RECOVERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BestAZRecovery" } - ] + }, + "LEAST_ERRORS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LeastErrors" + } + }, + "BEST_ATTAINABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BestAttainable" + } + }, + "BEST_REGION_RECOVERY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BestRegionRecovery" + } + } } }, "com.amazonaws.resiliencehub#ConflictException": { @@ -2737,26 +2798,32 @@ } }, "com.amazonaws.resiliencehub#CostFrequency": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Hourly", - "name": "HOURLY" - }, - { - "value": "Daily", - "name": "DAILY" - }, - { - "value": "Monthly", - "name": "MONTHLY" - }, - { - "value": "Yearly", - "name": "YEARLY" + "type": "enum", + "members": { + "HOURLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Hourly" + } + }, + "DAILY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Daily" + } + }, + "MONTHLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Monthly" + } + }, + "YEARLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Yearly" } - ] + } } }, "com.amazonaws.resiliencehub#CreateApp": { @@ -2851,6 +2918,12 @@ "traits": { "smithy.api#documentation": "

The list of events you would like to subscribe and get notification for. Currently,\n Resilience Hub supports only Drift detected and\n Scheduled assessment failure events notification.

" } + }, + "awsApplicationArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

" + } } } }, @@ -3335,22 +3408,26 @@ } }, "com.amazonaws.resiliencehub#DataLocationConstraint": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AnyLocation", - "name": "ANY_LOCATION" - }, - { - "value": "SameContinent", - "name": "SAME_CONTINENT" - }, - { - "value": "SameCountry", - "name": "SAME_COUNTRY" + "type": "enum", + "members": { + "ANY_LOCATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AnyLocation" + } + }, + "SAME_CONTINENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SameContinent" } - ] + }, + "SAME_COUNTRY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SameCountry" + } + } } }, "com.amazonaws.resiliencehub#DeleteApp": { @@ -4579,7 +4656,13 @@ "errorMessage": { "target": "com.amazonaws.resiliencehub#String500", "traits": { - "smithy.api#documentation": "

The returned error message for the request.

" + "smithy.api#documentation": "

The error message returned for the resource request.

" + } + }, + "errorDetails": { + "target": "com.amazonaws.resiliencehub#ErrorDetailList", + "traits": { + "smithy.api#documentation": "

List of errors that were encountered while importing resources.

" } } } @@ -4726,22 +4809,26 @@ } }, "com.amazonaws.resiliencehub#DifferenceType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "NotEqual", - "name": "NOT_EQUAL" - }, - { - "value": "Added", - "name": "ADDED" - }, - { - "value": "Removed", - "name": "REMOVED" + "type": "enum", + "members": { + "NOT_EQUAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotEqual" + } + }, + "ADDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Added" + } + }, + "REMOVED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Removed" } - ] + } } }, "com.amazonaws.resiliencehub#DisruptionCompliance": { @@ -4836,26 +4923,32 @@ } }, "com.amazonaws.resiliencehub#DisruptionType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Software", - "name": "SOFTWARE" - }, - { - "value": "Hardware", - "name": "HARDWARE" - }, - { - "value": "AZ", - "name": "AZ" - }, - { - "value": "Region", - "name": "REGION" + "type": "enum", + "members": { + "SOFTWARE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Software" + } + }, + "HARDWARE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Hardware" + } + }, + "AZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AZ" + } + }, + "REGION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Region" } - ] + } } }, "com.amazonaws.resiliencehub#DocumentName": { @@ -4874,37 +4967,43 @@ } }, "com.amazonaws.resiliencehub#DriftStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "NotChecked", - "name": "NOT_CHECKED" - }, - { - "value": "NotDetected", - "name": "NOT_DETECTED" - }, - { - "value": "Detected", - "name": "DETECTED" + "type": "enum", + "members": { + "NOT_CHECKED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotChecked" + } + }, + "NOT_DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotDetected" + } + }, + "DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Detected" } - ] + } } }, "com.amazonaws.resiliencehub#DriftType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ApplicationCompliance", - "name": "APPLICATION_COMPLIANCE" - }, - { - "value": "AppComponentResiliencyComplianceStatus", - "name": "APP_COMPONENT_RESILIENCY_COMPLIANCE_STATUS" + "type": "enum", + "members": { + "APPLICATION_COMPLIANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ApplicationCompliance" } - ] + }, + "APP_COMPONENT_RESILIENCY_COMPLIANCE_STATUS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AppComponentResiliencyComplianceStatus" + } + } } }, "com.amazonaws.resiliencehub#EksNamespace": { @@ -5012,36 +5111,62 @@ "smithy.api#pattern": "^\\S{1,50}$" } }, - "com.amazonaws.resiliencehub#ErrorMessage": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 500 + "com.amazonaws.resiliencehub#ErrorDetail": { + "type": "structure", + "members": { + "errorMessage": { + "target": "com.amazonaws.resiliencehub#ErrorMessage", + "traits": { + "smithy.api#documentation": "

Provides additional information about the error.

" + } } + }, + "traits": { + "smithy.api#documentation": "

Indicates the error that was encountered while importing a resource.

" } }, - "com.amazonaws.resiliencehub#EstimatedCostTier": { + "com.amazonaws.resiliencehub#ErrorDetailList": { + "type": "list", + "member": { + "target": "com.amazonaws.resiliencehub#ErrorDetail" + } + }, + "com.amazonaws.resiliencehub#ErrorMessage": { "type": "string", "traits": { - "smithy.api#enum": [ - { - "value": "L1", - "name": "L1" - }, - { - "value": "L2", - "name": "L2" - }, - { - "value": "L3", - "name": "L3" - }, - { - "value": "L4", - "name": "L4" + "smithy.api#length": { + "min": 0, + "max": 500 + } + } + }, + "com.amazonaws.resiliencehub#EstimatedCostTier": { + "type": "enum", + "members": { + "L1": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "L1" + } + }, + "L2": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "L2" + } + }, + "L3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "L3" + } + }, + "L4": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "L4" } - ] + } } }, "com.amazonaws.resiliencehub#EventSubscription": { @@ -5085,37 +5210,43 @@ } }, "com.amazonaws.resiliencehub#EventType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "ScheduledAssessmentFailure", - "name": "SCHEDULED_ASSESSMENT_FAILURE" - }, - { - "value": "DriftDetected", - "name": "DRIFT_DETECTED" + "type": "enum", + "members": { + "SCHEDULED_ASSESSMENT_FAILURE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ScheduledAssessmentFailure" + } + }, + "DRIFT_DETECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DriftDetected" } - ] + } } }, "com.amazonaws.resiliencehub#ExcludeRecommendationReason": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AlreadyImplemented", - "name": "ALREADY_IMPLEMENTED" - }, - { - "value": "NotRelevant", - "name": "NOT_RELEVANT" - }, - { - "value": "ComplexityOfImplementation", - "name": "COMPLEXITY_OF_IMPLEMENTATION" + "type": "enum", + "members": { + "ALREADY_IMPLEMENTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AlreadyImplemented" + } + }, + "NOT_RELEVANT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotRelevant" } - ] + }, + "COMPLEXITY_OF_IMPLEMENTATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ComplexityOfImplementation" + } + } } }, "com.amazonaws.resiliencehub#FailedGroupingRecommendationEntries": { @@ -5271,18 +5402,20 @@ } }, "com.amazonaws.resiliencehub#GroupingRecommendationConfidenceLevel": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "HIGH", - "value": "High" - }, - { - "name": "MEDIUM", - "value": "Medium" + "type": "enum", + "members": { + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "High" + } + }, + "MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Medium" } - ] + } } }, "com.amazonaws.resiliencehub#GroupingRecommendationList": { @@ -5292,45 +5425,55 @@ } }, "com.amazonaws.resiliencehub#GroupingRecommendationRejectionReason": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "DISTINCT_BUSINESS_PURPOSE", - "value": "DistinctBusinessPurpose" - }, - { - "name": "SEPARATE_DATA_CONCERN", - "value": "SeparateDataConcern" - }, - { - "name": "DISTINCT_USER_GROUP_HANDLING", - "value": "DistinctUserGroupHandling" - }, - { - "name": "OTHER", - "value": "Other" + "type": "enum", + "members": { + "DISTINCT_BUSINESS_PURPOSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DistinctBusinessPurpose" + } + }, + "SEPARATE_DATA_CONCERN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SeparateDataConcern" } - ] + }, + "DISTINCT_USER_GROUP_HANDLING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DistinctUserGroupHandling" + } + }, + "OTHER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Other" + } + } } }, "com.amazonaws.resiliencehub#GroupingRecommendationStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "ACCEPTED", - "value": "Accepted" - }, - { - "name": "REJECTED", - "value": "Rejected" - }, - { - "name": "PENDING_DECISION", - "value": "PendingDecision" + "type": "enum", + "members": { + "ACCEPTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Accepted" } - ] + }, + "REJECTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Rejected" + } + }, + "PENDING_DECISION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PendingDecision" + } + } } }, "com.amazonaws.resiliencehub#GroupingResource": { @@ -5383,30 +5526,38 @@ } }, "com.amazonaws.resiliencehub#HaArchitecture": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "MultiSite", - "name": "MULTI_SITE" - }, - { - "value": "WarmStandby", - "name": "WARM_STANDBY" - }, - { - "value": "PilotLight", - "name": "PILOT_LIGHT" - }, - { - "value": "BackupAndRestore", - "name": "BACKUP_AND_RESTORE" - }, - { - "value": "NoRecoveryPlan", - "name": "NO_RECOVERY_PLAN" + "type": "enum", + "members": { + "MULTI_SITE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MultiSite" + } + }, + "WARM_STANDBY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "WarmStandby" + } + }, + "PILOT_LIGHT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PilotLight" + } + }, + "BACKUP_AND_RESTORE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BackupAndRestore" + } + }, + "NO_RECOVERY_PLAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NoRecoveryPlan" } - ] + } } }, "com.amazonaws.resiliencehub#IamRoleArn": { @@ -5679,7 +5830,7 @@ } ], "traits": { - "smithy.api#documentation": "

List of compliance drifts that were detected while running an assessment.

", + "smithy.api#documentation": "

Indicates the list of compliance drifts that were detected while running an\n assessment.

", "smithy.api#http": { "method": "POST", "uri": "/list-app-assessment-compliance-drifts", @@ -6668,6 +6819,13 @@ "smithy.api#documentation": "

The application list is sorted based on the values of\n lastAppComplianceEvaluationTime field. By default, application list is sorted\n in ascending order. To sort the application list in descending order, set this field to\n True.

", "smithy.api#httpQuery": "reverseOrder" } + }, + "awsApplicationArn": { + "target": "com.amazonaws.resiliencehub#Arn", + "traits": { + "smithy.api#documentation": "

Amazon Resource Name (ARN) of Resource Groups group that is integrated with an AppRegistry application. For more information about ARNs, \nsee \n Amazon Resource Names (ARNs) in the \n Amazon Web Services General Reference guide.

", + "smithy.api#httpQuery": "awsApplicationArn" + } } } }, @@ -7471,33 +7629,37 @@ } }, "com.amazonaws.resiliencehub#PermissionModelType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "LegacyIAMUser", - "name": "LEGACY_IAM_USER" - }, - { - "value": "RoleBased", - "name": "ROLE_BASED" + "type": "enum", + "members": { + "LEGACY_IAM_USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "LegacyIAMUser" + } + }, + "ROLE_BASED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RoleBased" } - ] + } } }, "com.amazonaws.resiliencehub#PhysicalIdentifierType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Arn", - "name": "ARN" - }, - { - "value": "Native", - "name": "NATIVE" + "type": "enum", + "members": { + "ARN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Arn" + } + }, + "NATIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Native" } - ] + } } }, "com.amazonaws.resiliencehub#PhysicalResource": { @@ -7773,26 +7935,32 @@ } }, "com.amazonaws.resiliencehub#RecommendationComplianceStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "BreachedUnattainable", - "name": "BREACHED_UNATTAINABLE" - }, - { - "value": "BreachedCanMeet", - "name": "BREACHED_CAN_MEET" - }, - { - "value": "MetCanImprove", - "name": "MET_CAN_IMPROVE" - }, - { - "value": "MissingPolicy", - "name": "MISSING_POLICY" + "type": "enum", + "members": { + "BREACHED_UNATTAINABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BreachedUnattainable" } - ] + }, + "BREACHED_CAN_MEET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "BreachedCanMeet" + } + }, + "MET_CAN_IMPROVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MetCanImprove" + } + }, + "MISSING_POLICY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MissingPolicy" + } + } } }, "com.amazonaws.resiliencehub#RecommendationDisruptionCompliance": { @@ -7899,26 +8067,32 @@ } }, "com.amazonaws.resiliencehub#RecommendationStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Implemented", - "name": "IMPLEMENTED" - }, - { - "value": "Inactive", - "name": "INACTIVE" - }, - { - "value": "NotImplemented", - "name": "NOT_IMPLEMENTED" - }, - { - "value": "Excluded", - "name": "EXCLUDED" + "type": "enum", + "members": { + "IMPLEMENTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Implemented" + } + }, + "INACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Inactive" + } + }, + "NOT_IMPLEMENTED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotImplemented" + } + }, + "EXCLUDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Excluded" } - ] + } } }, "com.amazonaws.resiliencehub#RecommendationTemplate": { @@ -8026,26 +8200,32 @@ } }, "com.amazonaws.resiliencehub#RecommendationTemplateStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Pending", - "name": "PENDING" - }, - { - "value": "InProgress", - "name": "IN_PROGRESS" - }, - { - "value": "Failed", - "name": "FAILED" - }, - { - "value": "Success", - "name": "SUCCESS" + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Pending" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" } - ] + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" + } + } } }, "com.amazonaws.resiliencehub#RecommendationTemplateStatusList": { @@ -8274,22 +8454,26 @@ } }, "com.amazonaws.resiliencehub#RenderRecommendationType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Alarm", - "name": "ALARM" - }, - { - "value": "Sop", - "name": "SOP" - }, - { - "value": "Test", - "name": "TEST" + "type": "enum", + "members": { + "ALARM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Alarm" + } + }, + "SOP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Sop" + } + }, + "TEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Test" } - ] + } } }, "com.amazonaws.resiliencehub#RenderRecommendationTypeList": { @@ -8373,34 +8557,44 @@ } }, "com.amazonaws.resiliencehub#ResiliencyPolicyTier": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "MissionCritical", - "name": "MISSION_CRITICAL" - }, - { - "value": "Critical", - "name": "CRITICAL" - }, - { - "value": "Important", - "name": "IMPORTANT" - }, - { - "value": "CoreServices", - "name": "CORE_SERVICES" - }, - { - "value": "NonCritical", - "name": "NON_CRITICAL" - }, - { - "value": "NotApplicable", - "name": "NOT_APPLICABLE" + "type": "enum", + "members": { + "MISSION_CRITICAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MissionCritical" + } + }, + "CRITICAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Critical" + } + }, + "IMPORTANT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Important" + } + }, + "CORE_SERVICES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CoreServices" + } + }, + "NON_CRITICAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NonCritical" + } + }, + "NOT_APPLICABLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NotApplicable" } - ] + } } }, "com.amazonaws.resiliencehub#ResiliencyScore": { @@ -8433,26 +8627,32 @@ } }, "com.amazonaws.resiliencehub#ResiliencyScoreType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Compliance", - "name": "COMPLIANCE" - }, - { - "value": "Test", - "name": "TEST" - }, - { - "value": "Alarm", - "name": "ALARM" - }, - { - "value": "Sop", - "name": "SOP" + "type": "enum", + "members": { + "COMPLIANCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Compliance" + } + }, + "TEST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Test" + } + }, + "ALARM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Alarm" } - ] + }, + "SOP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Sop" + } + } } }, "com.amazonaws.resiliencehub#ResolveAppVersionResources": { @@ -8667,41 +8867,49 @@ } }, "com.amazonaws.resiliencehub#ResourceImportStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "PENDING", - "value": "Pending" - }, - { - "name": "IN_PROGRESS", - "value": "InProgress" - }, - { - "name": "FAILED", - "value": "Failed" - }, - { - "name": "SUCCESS", - "value": "Success" + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Pending" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" } - ] + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" + } + } } }, "com.amazonaws.resiliencehub#ResourceImportStrategyType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AddOnly", - "name": "ADD_ONLY" - }, - { - "value": "ReplaceAll", - "name": "REPLACE_ALL" + "type": "enum", + "members": { + "ADD_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AddOnly" } - ] + }, + "REPLACE_ALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ReplaceAll" + } + } } }, "com.amazonaws.resiliencehub#ResourceMapping": { @@ -8769,34 +8977,44 @@ } }, "com.amazonaws.resiliencehub#ResourceMappingType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "CfnStack", - "name": "CFN_STACK" - }, - { - "value": "Resource", - "name": "RESOURCE" - }, - { - "value": "AppRegistryApp", - "name": "APP_REGISTRY_APP" - }, - { - "value": "ResourceGroup", - "name": "RESOURCE_GROUP" - }, - { - "value": "Terraform", - "name": "TERRAFORM" - }, - { - "value": "EKS", - "name": "EKS" + "type": "enum", + "members": { + "CFN_STACK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CfnStack" + } + }, + "RESOURCE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Resource" } - ] + }, + "APP_REGISTRY_APP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AppRegistryApp" + } + }, + "RESOURCE_GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ResourceGroup" + } + }, + "TERRAFORM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Terraform" + } + }, + "EKS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "EKS" + } + } } }, "com.amazonaws.resiliencehub#ResourceNotFoundException": { @@ -8825,41 +9043,49 @@ } }, "com.amazonaws.resiliencehub#ResourceResolutionStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Pending", - "name": "PENDING" - }, - { - "value": "InProgress", - "name": "IN_PROGRESS" - }, - { - "value": "Failed", - "name": "FAILED" - }, - { - "value": "Success", - "name": "SUCCESS" + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Pending" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" } - ] + } } }, "com.amazonaws.resiliencehub#ResourceSourceType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "AppTemplate", - "name": "APP_TEMPLATE" - }, - { - "value": "Discovered", - "name": "DISCOVERED" + "type": "enum", + "members": { + "APP_TEMPLATE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AppTemplate" + } + }, + "DISCOVERED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Discovered" } - ] + } } }, "com.amazonaws.resiliencehub#ResourceType": { @@ -8869,26 +9095,32 @@ } }, "com.amazonaws.resiliencehub#ResourcesGroupingRecGenStatusType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "name": "PENDING", - "value": "Pending" - }, - { - "name": "IN_PROGRESS", - "value": "InProgress" - }, - { - "name": "FAILED", - "value": "Failed" - }, - { - "name": "SUCCESS", - "value": "Success" + "type": "enum", + "members": { + "PENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Pending" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InProgress" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Failed" + } + }, + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Success" } - ] + } } }, "com.amazonaws.resiliencehub#RetryAfterSeconds": { @@ -9062,14 +9294,14 @@ } }, "com.amazonaws.resiliencehub#SopServiceType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "SSM", - "name": "SSM" + "type": "enum", + "members": { + "SSM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SSM" } - ] + } } }, "com.amazonaws.resiliencehub#SpecReferenceId": { @@ -9421,18 +9653,20 @@ } }, "com.amazonaws.resiliencehub#TemplateFormat": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "CfnYaml", - "name": "CFN_YAML" - }, - { - "value": "CfnJson", - "name": "CFN_JSON" + "type": "enum", + "members": { + "CFN_YAML": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CfnYaml" } - ] + }, + "CFN_JSON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CfnJson" + } + } } }, "com.amazonaws.resiliencehub#TerraformSource": { @@ -9544,45 +9778,55 @@ } }, "com.amazonaws.resiliencehub#TestRisk": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Small", - "name": "SMALL" - }, - { - "value": "Medium", - "name": "MEDIUM" - }, - { - "value": "High", - "name": "HIGH" + "type": "enum", + "members": { + "SMALL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Small" } - ] + }, + "MEDIUM": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Medium" + } + }, + "HIGH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "High" + } + } } }, "com.amazonaws.resiliencehub#TestType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "Software", - "name": "SOFTWARE" - }, - { - "value": "Hardware", - "name": "HARDWARE" - }, - { - "value": "AZ", - "name": "AZ" - }, - { - "value": "Region", - "name": "REGION" + "type": "enum", + "members": { + "SOFTWARE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Software" } - ] + }, + "HARDWARE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Hardware" + } + }, + "AZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AZ" + } + }, + "REGION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Region" + } + } } }, "com.amazonaws.resiliencehub#ThrottlingException": { @@ -9781,7 +10025,7 @@ "permissionModel": { "target": "com.amazonaws.resiliencehub#PermissionModel", "traits": { - "smithy.api#documentation": "

Defines the roles and credentials that Resilience Hub would use while creating\n an\n application, importing its resources, and running an assessment.

" + "smithy.api#documentation": "

Defines the roles and credentials that Resilience Hub would use while creating an\n application, importing its resources, and running an assessment.

" } }, "eventSubscriptions": { @@ -10172,8 +10416,7 @@ "item": { "target": "com.amazonaws.resiliencehub#UpdateRecommendationStatusItem", "traits": { - "smithy.api#documentation": "

The operational recommendation item.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The operational recommendation item.

" } }, "excluded": { diff --git a/models/resource-explorer-2.json b/models/resource-explorer-2.json index a69f93ec77..0340244fb3 100644 --- a/models/resource-explorer-2.json +++ b/models/resource-explorer-2.json @@ -650,7 +650,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the status of your account's Amazon Web Services service access, and validates the service\n linked role required to access the multi-account search feature. Only the management\n account or a delegated administrator with service access enabled can invoke this API\n call.

", + "smithy.api#documentation": "

Retrieves the status of your account's Amazon Web Services service access, and validates the service\n linked role required to access the multi-account search feature. Only the management\n account can invoke this API call.

", "smithy.api#http": { "method": "POST", "uri": "/GetAccountLevelServiceConfiguration" @@ -1186,6 +1186,124 @@ } } }, + "com.amazonaws.resourceexplorer2#ListResources": { + "type": "operation", + "input": { + "target": "com.amazonaws.resourceexplorer2#ListResourcesInput" + }, + "output": { + "target": "com.amazonaws.resourceexplorer2#ListResourcesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.resourceexplorer2#AccessDeniedException" + }, + { + "target": "com.amazonaws.resourceexplorer2#InternalServerException" + }, + { + "target": "com.amazonaws.resourceexplorer2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.resourceexplorer2#ThrottlingException" + }, + { + "target": "com.amazonaws.resourceexplorer2#UnauthorizedException" + }, + { + "target": "com.amazonaws.resourceexplorer2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of resources and their details that match the specified criteria. This query must \n use a view. If you don’t explicitly specify a view, then Resource Explorer uses the default view for the Amazon Web Services Region \n in which you call this operation.

", + "smithy.api#http": { + "method": "POST", + "uri": "/ListResources" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults", + "items": "Resources" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.resourceexplorer2#ListResourcesInput": { + "type": "structure", + "members": { + "Filters": { + "target": "com.amazonaws.resourceexplorer2#SearchFilter" + }, + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The maximum number of results that you want included on each page of the \n response. If you do not include this parameter, it defaults to a value appropriate to the \n operation. If additional items exist beyond those included in the current response, the \n NextToken response element is present and has a value (is not null). Include that \n value as the NextToken request parameter in the next call to the operation to get \n the next part of the results.

\n \n

An API operation can return fewer results than the maximum even when there are \n more results available. You should check NextToken after every operation to ensure \n that you receive all of the results.

\n
", + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "ViewArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon resource name (ARN) of the view to use for the query. If you don't \n specify a value for this parameter, then the operation automatically uses the default view \n for the Amazon Web Services Region in which you called this operation. If the Region either doesn't have \n a default view or if you don't have permission to use the default view, then the operation \n fails with a 401 Unauthorized exception.

", + "smithy.api#length": { + "min": 0, + "max": 1000 + } + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The parameter for receiving additional results if you receive a \n NextToken response in a previous request. A NextToken response \n indicates that more output is available. Set this parameter to the value of the previous \n call's NextToken response to indicate where the output should continue \n from. The pagination tokens expire after 24 hours.

", + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resourceexplorer2#ListResourcesOutput": { + "type": "structure", + "members": { + "Resources": { + "target": "com.amazonaws.resourceexplorer2#ResourceList", + "traits": { + "smithy.api#documentation": "

The list of structures that describe the resources that match the query.

" + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

If present, indicates that more output is available than is \n included in the current response. Use this value in the NextToken request parameter \n in a subsequent call to the operation to get the next part of the output. You should repeat this \n until the NextToken response element comes back as null.\n The pagination tokens expire after 24 hours.

", + "smithy.api#length": { + "min": 1, + "max": 2048 + } + } + }, + "ViewArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the view that this operation used to perform the search.

", + "smithy.api#length": { + "min": 1, + "max": 1011 + } + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resourceexplorer2#ListSupportedResourceTypes": { "type": "operation", "input": { @@ -1461,7 +1579,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 1011 + "max": 1280 }, "smithy.api#sensitive": {} } @@ -1502,7 +1620,7 @@ "Service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Service that owns the resource and is responsible for creating and updating\n it.

" + "smithy.api#documentation": "

The Amazon Web Servicesservice that owns the resource and is responsible for creating and updating\n it.

" } }, "LastReportedAt": { @@ -1565,6 +1683,9 @@ { "target": "com.amazonaws.resourceexplorer2#ListIndexesForMembers" }, + { + "target": "com.amazonaws.resourceexplorer2#ListResources" + }, { "target": "com.amazonaws.resourceexplorer2#ListSupportedResourceTypes" }, @@ -2490,7 +2611,7 @@ "Service": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The Amazon Web Service that is associated with the resource type. This is the primary\n service that lets you create and interact with resources of this type.

" + "smithy.api#documentation": "

The Amazon Web Servicesservice that is associated with the resource type. This is the primary\n service that lets you create and interact with resources of this type.

" } }, "ResourceType": { diff --git a/models/resource-groups.json b/models/resource-groups.json index e129ed3eca..dd96ff942b 100644 --- a/models/resource-groups.json +++ b/models/resource-groups.json @@ -55,10 +55,38 @@ "smithy.api#documentation": "

The Resource Groups settings for this Amazon Web Services account.

" } }, + "com.amazonaws.resourcegroups#ApplicationArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 1600 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,150}/[a-zA-Z0-9]{22,26}$" + } + }, + "com.amazonaws.resourcegroups#ApplicationTag": { + "type": "map", + "key": { + "target": "com.amazonaws.resourcegroups#ApplicationTagKey" + }, + "value": { + "target": "com.amazonaws.resourcegroups#ApplicationArn" + } + }, + "com.amazonaws.resourcegroups#ApplicationTagKey": { + "type": "string", + "traits": { + "smithy.api#pattern": "^awsApplication$" + } + }, "com.amazonaws.resourcegroups#Ardi": { "type": "service", "version": "2017-11-27", "operations": [ + { + "target": "com.amazonaws.resourcegroups#CancelTagSyncTask" + }, { "target": "com.amazonaws.resourcegroups#CreateGroup" }, @@ -80,21 +108,33 @@ { "target": "com.amazonaws.resourcegroups#GetTags" }, + { + "target": "com.amazonaws.resourcegroups#GetTagSyncTask" + }, { "target": "com.amazonaws.resourcegroups#GroupResources" }, + { + "target": "com.amazonaws.resourcegroups#ListGroupingStatuses" + }, { "target": "com.amazonaws.resourcegroups#ListGroupResources" }, { "target": "com.amazonaws.resourcegroups#ListGroups" }, + { + "target": "com.amazonaws.resourcegroups#ListTagSyncTasks" + }, { "target": "com.amazonaws.resourcegroups#PutGroupConfiguration" }, { "target": "com.amazonaws.resourcegroups#SearchResources" }, + { + "target": "com.amazonaws.resourcegroups#StartTagSyncTask" + }, { "target": "com.amazonaws.resourcegroups#Tag" }, @@ -126,7 +166,7 @@ "name": "resource-groups" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Resource Groups lets you organize Amazon Web Services resources such as Amazon Elastic Compute Cloud instances, Amazon Relational Database Service\n databases, and Amazon Simple Storage Service buckets into groups using criteria that you define as tags. A\n resource group is a collection of resources that match the resource types specified in a\n query, and share one or more tags or portions of tags. You can create a group of\n resources based on their roles in your cloud infrastructure, lifecycle stages, regions,\n application layers, or virtually any criteria. Resource Groups enable you to automate management\n tasks, such as those in Amazon Web Services Systems Manager Automation documents, on tag-related resources in\n Amazon Web Services Systems Manager. Groups of tagged resources also let you quickly view a custom console in\n Amazon Web Services Systems Manager that shows Config compliance and other monitoring data about member\n resources.

\n

To create a resource group, build a resource query, and specify tags that identify the\n criteria that members of the group have in common. Tags are key-value pairs.

\n

For more information about Resource Groups, see the Resource Groups User Guide.

\n

Resource Groups uses a REST-compliant API that you can use to perform the following types of\n operations.

\n
    \n
  • \n

    Create, Read, Update, and Delete (CRUD) operations on resource groups and\n resource query entities

    \n
  • \n
  • \n

    Applying, editing, and removing tags from resource groups

    \n
  • \n
  • \n

    Resolving resource group member ARNs so they can be returned as search\n results

    \n
  • \n
  • \n

    Getting data about resources that are members of a group

    \n
  • \n
  • \n

    Searching Amazon Web Services resources based on a resource query

    \n
  • \n
", + "smithy.api#documentation": "

Resource Groups lets you organize Amazon Web Services resources such as Amazon Elastic Compute Cloud instances, Amazon Relational Database Service\n databases, and Amazon Simple Storage Service buckets into groups using criteria that you define as tags. A\n resource group is a collection of resources that match the resource types specified in a\n query, and share one or more tags or portions of tags. You can create a group of\n resources based on their roles in your cloud infrastructure, lifecycle stages, regions,\n application layers, or virtually any criteria. Resource Groups enable you to automate management\n tasks, such as those in Amazon Web Services Systems Manager Automation documents, on tag-related resources in\n Amazon Web Services Systems Manager. Groups of tagged resources also let you quickly view a custom console in\n Amazon Web Services Systems Manager that shows Config compliance and other monitoring data about member\n resources.

\n

To create a resource group, build a resource query, and specify tags that identify the\n criteria that members of the group have in common. Tags are key-value pairs.

\n

For more information about Resource Groups, see the Resource Groups User Guide.

\n

Resource Groups uses a REST-compliant API that you can use to perform the following types of\n operations.

\n
    \n
  • \n

    Create, Read, Update, and Delete (CRUD) operations on resource groups and\n resource query entities

    \n
  • \n
  • \n

    Applying, editing, and removing tags from resource groups

    \n
  • \n
  • \n

    Resolving resource group member Amazon resource names (ARN)s so they can be returned as search\n results

    \n
  • \n
  • \n

    Getting data about resources that are members of a group

    \n
  • \n
  • \n

    Searching Amazon Web Services resources based on a resource query

    \n
  • \n
", "smithy.api#title": "AWS Resource Groups", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1147,6 +1187,58 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.resourcegroups#CancelTagSyncTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.resourcegroups#CancelTagSyncTaskInput" + }, + "output": { + "target": "smithy.api#Unit" + }, + "errors": [ + { + "target": "com.amazonaws.resourcegroups#BadRequestException" + }, + { + "target": "com.amazonaws.resourcegroups#ForbiddenException" + }, + { + "target": "com.amazonaws.resourcegroups#InternalServerErrorException" + }, + { + "target": "com.amazonaws.resourcegroups#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.resourcegroups#TooManyRequestsException" + }, + { + "target": "com.amazonaws.resourcegroups#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Cancels the specified tag-sync task.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:CancelTagSyncTask on the application group

    \n
  • \n
  • \n

    \n resource-groups:DeleteGroup\n

    \n
  • \n
", + "smithy.api#http": { + "method": "POST", + "uri": "/cancel-tag-sync-task", + "code": 200 + } + } + }, + "com.amazonaws.resourcegroups#CancelTagSyncTaskInput": { + "type": "structure", + "members": { + "TaskArn": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the tag-sync task.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, "com.amazonaws.resourcegroups#CreateGroup": { "type": "operation", "input": { @@ -1185,7 +1277,7 @@ "type": "structure", "members": { "Name": { - "target": "com.amazonaws.resourcegroups#GroupName", + "target": "com.amazonaws.resourcegroups#CreateGroupName", "traits": { "smithy.api#documentation": "

The name of the group, which is the identifier of the group in other operations. You\n can't change the name of a resource group after you create it. A resource group name can\n consist of letters, numbers, hyphens, periods, and underscores. The name cannot start\n with AWS, aws, or any other possible capitalization; these are\n reserved. A resource group name must be unique within each Amazon Web Services Region in your Amazon Web Services\n account.

", "smithy.api#required": {} @@ -1214,12 +1306,40 @@ "traits": { "smithy.api#documentation": "

A configuration associates the resource group with an Amazon Web Services service and specifies how\n the service can interact with the resources in the group. A configuration is an array of\n GroupConfigurationItem elements. For details about the syntax of\n service configurations, see Service configurations for Resource Groups.

\n \n

A resource group can contain either a Configuration or a\n ResourceQuery, but not both.

\n
" } + }, + "Criticality": { + "target": "com.amazonaws.resourcegroups#Criticality", + "traits": { + "smithy.api#documentation": "

The critical rank of the application group on a scale of 1 to 10, with a \n rank of 1 being the most critical, and a rank of 10 being least critical.

" + } + }, + "Owner": { + "target": "com.amazonaws.resourcegroups#Owner", + "traits": { + "smithy.api#documentation": "

A name, email address or other identifier for the person or group \n who is considered as the owner of this application group within your organization.

" + } + }, + "DisplayName": { + "target": "com.amazonaws.resourcegroups#DisplayName", + "traits": { + "smithy.api#documentation": "

The name of the application group, which you can change at any time.

" + } } }, "traits": { "smithy.api#input": {} } }, + "com.amazonaws.resourcegroups#CreateGroupName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 300 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_\\.-]+$" + } + }, "com.amazonaws.resourcegroups#CreateGroupOutput": { "type": "structure", "members": { @@ -1252,6 +1372,15 @@ "smithy.api#output": {} } }, + "com.amazonaws.resourcegroups#Criticality": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.resourcegroups#DeleteGroup": { "type": "operation", "input": { @@ -1302,9 +1431,9 @@ } }, "Group": { - "target": "com.amazonaws.resourcegroups#GroupString", + "target": "com.amazonaws.resourcegroups#GroupStringV2", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group to delete.

" + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group to delete.

" } } }, @@ -1336,6 +1465,16 @@ "smithy.api#pattern": "^[\\sa-zA-Z0-9_\\.-]*$" } }, + "com.amazonaws.resourcegroups#DisplayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 300 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, "com.amazonaws.resourcegroups#ErrorCode": { "type": "string", "traits": { @@ -1360,7 +1499,7 @@ "ResourceArn": { "target": "com.amazonaws.resourcegroups#ResourceArn", "traits": { - "smithy.api#documentation": "

The ARN of the resource that failed to be added or removed.

" + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource that failed to be added or removed.

" } }, "ErrorMessage": { @@ -1527,7 +1666,7 @@ "Group": { "target": "com.amazonaws.resourcegroups#GroupString", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group for which you want to retrive the service\n configuration.

" + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group for which you want to retrive the service\n configuration.

" } } }, @@ -1562,9 +1701,9 @@ } }, "Group": { - "target": "com.amazonaws.resourcegroups#GroupString", + "target": "com.amazonaws.resourcegroups#GroupStringV2", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group to retrieve.

" + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group to retrieve.

" } } }, @@ -1638,7 +1777,7 @@ "Group": { "target": "com.amazonaws.resourcegroups#GroupString", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group to query.

" + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group to query.

" } } }, @@ -1660,6 +1799,123 @@ "smithy.api#output": {} } }, + "com.amazonaws.resourcegroups#GetTagSyncTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.resourcegroups#GetTagSyncTaskInput" + }, + "output": { + "target": "com.amazonaws.resourcegroups#GetTagSyncTaskOutput" + }, + "errors": [ + { + "target": "com.amazonaws.resourcegroups#BadRequestException" + }, + { + "target": "com.amazonaws.resourcegroups#ForbiddenException" + }, + { + "target": "com.amazonaws.resourcegroups#InternalServerErrorException" + }, + { + "target": "com.amazonaws.resourcegroups#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.resourcegroups#NotFoundException" + }, + { + "target": "com.amazonaws.resourcegroups#TooManyRequestsException" + }, + { + "target": "com.amazonaws.resourcegroups#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about a specified tag-sync task.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:GetTagSyncTask on the application group

    \n
  • \n
", + "smithy.api#http": { + "method": "POST", + "uri": "/get-tag-sync-task", + "code": 200 + } + } + }, + "com.amazonaws.resourcegroups#GetTagSyncTaskInput": { + "type": "structure", + "members": { + "TaskArn": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the tag-sync task.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resourcegroups#GetTagSyncTaskOutput": { + "type": "structure", + "members": { + "GroupArn": { + "target": "com.amazonaws.resourcegroups#GroupArnV2", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the application group.

" + } + }, + "GroupName": { + "target": "com.amazonaws.resourcegroups#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the application group.

" + } + }, + "TaskArn": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the tag-sync task.

" + } + }, + "TagKey": { + "target": "com.amazonaws.resourcegroups#TagKey", + "traits": { + "smithy.api#documentation": "

The tag key.

" + } + }, + "TagValue": { + "target": "com.amazonaws.resourcegroups#TagValue", + "traits": { + "smithy.api#documentation": "

The tag value.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.resourcegroups#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the role assumed by Resource Groups to tag and untag resources on your behalf.

\n

For more information about this role, review Tag-sync required permissions. \n

" + } + }, + "Status": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskStatus", + "traits": { + "smithy.api#documentation": "

The status of the tag-sync task.

\n

Valid values include:

\n
    \n
  • \n

    \n ACTIVE - The tag-sync task is actively managing resources in \n the application by adding or removing the awsApplication tag from resources \n when they are tagged or untagged with the specified tag key-value pair. \n

    \n
  • \n
  • \n

    \n ERROR - The tag-sync task is not actively managing resources \n in the application. Review the ErrorMessage for more information about \n resolving the error. \n

    \n
  • \n
" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.resourcegroups#ErrorMessage", + "traits": { + "smithy.api#documentation": "

The specific error message in cases where the tag-sync task status\n is ERROR.

" + } + }, + "CreatedAt": { + "target": "com.amazonaws.resourcegroups#timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the tag-sync task was created.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resourcegroups#GetTags": { "type": "operation", "input": { @@ -1689,7 +1945,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of tags that are associated with a resource group, specified by an\n ARN.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:GetTags\n

    \n
  • \n
", + "smithy.api#documentation": "

Returns a list of tags that are associated with a resource group, specified by an\n Amazon resource name (ARN).

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:GetTags\n

    \n
  • \n
", "smithy.api#http": { "method": "GET", "uri": "/resources/{Arn}/tags", @@ -1701,9 +1957,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the resource group whose tags you want to retrieve.

", + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource group whose tags you want to retrieve.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1717,9 +1973,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the tagged resource group.

" + "smithy.api#documentation": "

TheAmazon resource name (ARN) of the tagged resource group.

" } }, "Tags": { @@ -1737,9 +1993,9 @@ "type": "structure", "members": { "GroupArn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the resource group.

", + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource group.

", "smithy.api#required": {} } }, @@ -1755,6 +2011,30 @@ "traits": { "smithy.api#documentation": "

The description of the resource group.

" } + }, + "Criticality": { + "target": "com.amazonaws.resourcegroups#Criticality", + "traits": { + "smithy.api#documentation": "

The critical rank of the application group on a scale of 1 to 10, with a \n rank of 1 being the most critical, and a rank of 10 being least critical.

" + } + }, + "Owner": { + "target": "com.amazonaws.resourcegroups#Owner", + "traits": { + "smithy.api#documentation": "

A name, email address or other identifier for the person or group \n who is considered as the owner of this application group within your organization.

" + } + }, + "DisplayName": { + "target": "com.amazonaws.resourcegroups#DisplayName", + "traits": { + "smithy.api#documentation": "

The name of the application group, which you can change at any time.

" + } + }, + "ApplicationTag": { + "target": "com.amazonaws.resourcegroups#ApplicationTag", + "traits": { + "smithy.api#documentation": "

A tag that defines the application group membership. This tag is only supported \n for application groups.

" + } } }, "traits": { @@ -1768,7 +2048,17 @@ "min": 12, "max": 1600 }, - "smithy.api#pattern": "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,300}$" + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$" + } + }, + "com.amazonaws.resourcegroups#GroupArnV2": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 1600 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$" } }, "com.amazonaws.resourcegroups#GroupConfiguration": { @@ -1961,6 +2251,24 @@ "traits": { "smithy.api#enumValue": "configuration-type" } + }, + "Owner": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "owner" + } + }, + "DisplayName": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "display-name" + } + }, + "Criticality": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "criticality" + } } } }, @@ -1969,9 +2277,9 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 128 + "max": 300 }, - "smithy.api#pattern": "^AWS::(AllSupported|[a-zA-Z0-9]+::[a-zA-Z0-9]+)$" + "smithy.api#pattern": "^AWS::(AllSupported|[a-zA-Z0-9]+::[a-zA-Z0-9]+)|[\\s\\p{L}0-9_\\.-]*$" } }, "com.amazonaws.resourcegroups#GroupFilterValues": { @@ -1998,7 +2306,31 @@ "GroupArn": { "target": "com.amazonaws.resourcegroups#GroupArn", "traits": { - "smithy.api#documentation": "

The ARN of the resource group.

" + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource group.

" + } + }, + "Description": { + "target": "com.amazonaws.resourcegroups#Description", + "traits": { + "smithy.api#documentation": "

The description of the application group.

" + } + }, + "Criticality": { + "target": "com.amazonaws.resourcegroups#Criticality", + "traits": { + "smithy.api#documentation": "

The critical rank of the application group on a scale of 1 to 10, with a \n rank of 1 being the most critical, and a rank of 10 being least critical.

" + } + }, + "Owner": { + "target": "com.amazonaws.resourcegroups#Owner", + "traits": { + "smithy.api#documentation": "

A name, email address or other identifier for the person or group \n who is considered as the owner of this group within your organization.

" + } + }, + "DisplayName": { + "target": "com.amazonaws.resourcegroups#DisplayName", + "traits": { + "smithy.api#documentation": "

The name of the application group, which you can change at any time.

" } } }, @@ -2080,7 +2412,7 @@ "min": 1, "max": 300 }, - "smithy.api#pattern": "^[a-zA-Z0-9_\\.-]+$" + "smithy.api#pattern": "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}$" } }, "com.amazonaws.resourcegroups#GroupParameterList": { @@ -2140,7 +2472,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds the specified resources to the specified group.

\n \n

You can use this operation with only resource groups that are configured with the\n following types:

\n
    \n
  • \n

    \n AWS::EC2::HostManagement\n

    \n
  • \n
  • \n

    \n AWS::EC2::CapacityReservationPool\n

    \n
  • \n
\n

Other resource group type and resource types aren't currently supported by this\n operation.

\n
\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:GroupResources\n

    \n
  • \n
", + "smithy.api#documentation": "

Adds the specified resources to the specified group.

\n \n

You can only use this operation with the following groups:

\n
    \n
  • \n

    \n AWS::EC2::HostManagement\n

    \n
  • \n
  • \n

    \n AWS::EC2::CapacityReservationPool\n

    \n
  • \n
  • \n

    \n AWS::ResourceGroups::ApplicationGroup\n

    \n
  • \n
\n

Other resource group types and resource types are not currently supported by this\n operation.

\n
\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:GroupResources\n

    \n
  • \n
", "smithy.api#http": { "method": "POST", "uri": "/group-resources", @@ -2152,16 +2484,16 @@ "type": "structure", "members": { "Group": { - "target": "com.amazonaws.resourcegroups#GroupString", + "target": "com.amazonaws.resourcegroups#GroupStringV2", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group to add resources to.

", + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group to add resources to.

", "smithy.api#required": {} } }, "ResourceArns": { "target": "com.amazonaws.resourcegroups#ResourceArnList", "traits": { - "smithy.api#documentation": "

The list of ARNs of the resources to be added to the group.

", + "smithy.api#documentation": "

The list of Amazon resource names (ARNs) of the resources to be added to the group.

", "smithy.api#required": {} } } @@ -2176,19 +2508,19 @@ "Succeeded": { "target": "com.amazonaws.resourcegroups#ResourceArnList", "traits": { - "smithy.api#documentation": "

A list of ARNs of the resources that this operation successfully added to the\n group.

" + "smithy.api#documentation": "

A list of Amazon resource names (ARNs) of the resources that this operation successfully added to the\n group.

" } }, "Failed": { "target": "com.amazonaws.resourcegroups#FailedResourceList", "traits": { - "smithy.api#documentation": "

A list of ARNs of any resources that this operation failed to add to the group.

" + "smithy.api#documentation": "

A list of Amazon resource names (ARNs) of any resources that this operation failed to add to the group.

" } }, "Pending": { "target": "com.amazonaws.resourcegroups#PendingResourceList", "traits": { - "smithy.api#documentation": "

A list of ARNs of any resources that this operation is still in the process adding to\n the group. These pending additions continue asynchronously. You can check the status of\n pending additions by using the \n ListGroupResources\n \n operation, and checking the Resources array in the response and the\n Status field of each object in that array.

" + "smithy.api#documentation": "

A list of Amazon resource names (ARNs) of any resources that this operation is still in the process adding to\n the group. These pending additions continue asynchronously. You can check the status of\n pending additions by using the \n ListGroupResources\n \n operation, and checking the Resources array in the response and the\n Status field of each object in that array.

" } } }, @@ -2203,16 +2535,122 @@ "min": 1, "max": 1600 }, - "smithy.api#pattern": "^(arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/)?[a-zA-Z0-9_\\.-]{1,300}$" + "smithy.api#pattern": "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$" } }, - "com.amazonaws.resourcegroups#InternalServerErrorException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.resourcegroups#ErrorMessage" - } - }, + "com.amazonaws.resourcegroups#GroupStringV2": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1600 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}|arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/([a-zA-Z0-9_\\.-]{1,300}|[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26})$" + } + }, + "com.amazonaws.resourcegroups#GroupingStatus": { + "type": "enum", + "members": { + "SUCCESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCESS" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "SKIPPED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SKIPPED" + } + } + } + }, + "com.amazonaws.resourcegroups#GroupingStatusesItem": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.resourcegroups#ResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of a resource.

" + } + }, + "Action": { + "target": "com.amazonaws.resourcegroups#GroupingType", + "traits": { + "smithy.api#documentation": "

Describes the resource grouping action with values of \n GROUP or UNGROUP.

" + } + }, + "Status": { + "target": "com.amazonaws.resourcegroups#GroupingStatus", + "traits": { + "smithy.api#documentation": "

Describes the resource grouping status with values of \n SUCCESS, FAILED, IN_PROGRESS, \n or SKIPPED.

" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.resourcegroups#ErrorMessage", + "traits": { + "smithy.api#documentation": "

A message that explains the ErrorCode.

" + } + }, + "ErrorCode": { + "target": "com.amazonaws.resourcegroups#ErrorCode", + "traits": { + "smithy.api#documentation": "

Specifies the error code that was raised.

" + } + }, + "UpdatedAt": { + "target": "com.amazonaws.resourcegroups#timestamp", + "traits": { + "smithy.api#documentation": "

A timestamp of when the status was last updated.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The information about a grouping or ungrouping resource action.

" + } + }, + "com.amazonaws.resourcegroups#GroupingStatusesList": { + "type": "list", + "member": { + "target": "com.amazonaws.resourcegroups#GroupingStatusesItem" + } + }, + "com.amazonaws.resourcegroups#GroupingType": { + "type": "enum", + "members": { + "GROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "GROUP" + } + }, + "UNGROUP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UNGROUP" + } + } + } + }, + "com.amazonaws.resourcegroups#InternalServerErrorException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.resourcegroups#ErrorMessage" + } + }, "traits": { "smithy.api#documentation": "

An internal error occurred while processing the request. Try again later.

", "smithy.api#error": "server", @@ -2251,7 +2689,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of ARNs of the resources that are members of a specified resource\n group.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:ListGroupResources\n

    \n
  • \n
  • \n

    \n cloudformation:DescribeStacks\n

    \n
  • \n
  • \n

    \n cloudformation:ListStackResources\n

    \n
  • \n
  • \n

    \n tag:GetResources\n

    \n
  • \n
", + "smithy.api#documentation": "

Returns a list of Amazon resource names (ARNs) of the resources that are members of a specified resource\n group.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:ListGroupResources\n

    \n
  • \n
  • \n

    \n cloudformation:DescribeStacks\n

    \n
  • \n
  • \n

    \n cloudformation:ListStackResources\n

    \n
  • \n
  • \n

    \n tag:GetResources\n

    \n
  • \n
", "smithy.api#http": { "method": "POST", "uri": "/list-group-resources", @@ -2278,9 +2716,9 @@ } }, "Group": { - "target": "com.amazonaws.resourcegroups#GroupString", + "target": "com.amazonaws.resourcegroups#GroupStringV2", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group

" + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group.

" } }, "Filters": { @@ -2364,6 +2802,168 @@ "smithy.api#output": {} } }, + "com.amazonaws.resourcegroups#ListGroupingStatuses": { + "type": "operation", + "input": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesInput" + }, + "output": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.resourcegroups#BadRequestException" + }, + { + "target": "com.amazonaws.resourcegroups#ForbiddenException" + }, + { + "target": "com.amazonaws.resourcegroups#InternalServerErrorException" + }, + { + "target": "com.amazonaws.resourcegroups#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.resourcegroups#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns the status of the last grouping or ungrouping action for \n each resource in the specified application group.

", + "smithy.api#http": { + "method": "POST", + "uri": "/list-grouping-statuses", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "GroupingStatuses", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesFilter": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesFilterName", + "traits": { + "smithy.api#documentation": "

The name of the filter. Filter names are case-sensitive.

", + "smithy.api#required": {} + } + }, + "Values": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesFilterValues", + "traits": { + "smithy.api#documentation": "

One or more filter values. Allowed filter values vary by resource filter name, and are case-sensitive.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A filter name and value pair that is used to obtain more specific results from the list of grouping statuses.

" + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesFilter" + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesFilterName": { + "type": "enum", + "members": { + "Status": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "status" + } + }, + "ResourceArn": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "resource-arn" + } + } + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesFilterValue": { + "type": "string", + "traits": { + "smithy.api#pattern": "^SUCCESS|FAILED|IN_PROGRESS|SKIPPED|arn:aws(-[a-z]+)*:[a-z0-9\\-]*:([a-z]{2}(-[a-z]+)+-\\d{1})?:([0-9]{12})?:.+$" + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesFilterValues": { + "type": "list", + "member": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesFilterValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesInput": { + "type": "structure", + "members": { + "Group": { + "target": "com.amazonaws.resourcegroups#GroupStringV2", + "traits": { + "smithy.api#documentation": "

The application group identifier, expressed as an Amazon resource name (ARN) or the application group name.

", + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.resourcegroups#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of resources and their statuses returned in the \n response.

" + } + }, + "Filters": { + "target": "com.amazonaws.resourcegroups#ListGroupingStatusesFilterList", + "traits": { + "smithy.api#documentation": "

The filter name and value pair that is used to return more \n specific results from a list of resources.

" + } + }, + "NextToken": { + "target": "com.amazonaws.resourcegroups#NextToken", + "traits": { + "smithy.api#documentation": "

The parameter for receiving additional results if you receive a \n NextToken response in a previous request. A NextToken \n response indicates that more output is available. Set this parameter to the \n value provided by a previous call's NextToken response to indicate \n where the output should continue from.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resourcegroups#ListGroupingStatusesOutput": { + "type": "structure", + "members": { + "Group": { + "target": "com.amazonaws.resourcegroups#GroupStringV2", + "traits": { + "smithy.api#documentation": "

The application group identifier, expressed as an Amazon resource name (ARN) or the application group name.

" + } + }, + "GroupingStatuses": { + "target": "com.amazonaws.resourcegroups#GroupingStatusesList", + "traits": { + "smithy.api#documentation": "

Returns details about the grouping or ungrouping status of the \n resources in the specified application group.

" + } + }, + "NextToken": { + "target": "com.amazonaws.resourcegroups#NextToken", + "traits": { + "smithy.api#documentation": "

If present, indicates that more output is available than is included in the current response. \n Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. \n You should repeat this until the NextToken response element comes back as null.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resourcegroups#ListGroups": { "type": "operation", "input": { @@ -2410,7 +3010,7 @@ "Filters": { "target": "com.amazonaws.resourcegroups#GroupFilterList", "traits": { - "smithy.api#documentation": "

Filters, formatted as GroupFilter objects, that you want to apply to\n a ListGroups operation.

\n
    \n
  • \n

    \n resource-type - Filter the results to include only those resource groups that have the specified\n resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would\n return any resource group with a ResourceTypeFilter that includes\n AWS::EC2::Instance.

    \n
  • \n
  • \n

    \n configuration-type - Filter the results to include only those\n groups that have the specified configuration types attached. The current\n supported values are:

    \n
      \n
    • \n

      \n AWS::AppRegistry::Application\n

      \n
    • \n
    • \n

      \n AWS::AppRegistry::ApplicationResourceGroups\n

      \n
    • \n
    • \n

      \n AWS::CloudFormation::Stack\n

      \n
    • \n
    • \n

      \n AWS::EC2::CapacityReservationPool\n

      \n
    • \n
    • \n

      \n AWS::EC2::HostManagement\n

      \n
    • \n
    • \n

      \n AWS::NetworkFirewall::RuleGroup\n

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

Filters, formatted as GroupFilter objects, that you want to apply to\n a ListGroups operation.

\n
    \n
  • \n

    \n resource-type - Filter the results to include only those resource groups that have the specified\n resource type in their ResourceTypeFilter. For example, AWS::EC2::Instance would\n return any resource group with a ResourceTypeFilter that includes\n AWS::EC2::Instance.

    \n
  • \n
  • \n

    \n configuration-type - Filter the results to include only those\n groups that have the specified configuration types attached. The current\n supported values are:

    \n
      \n
    • \n

      \n AWS::ResourceGroups::ApplicationGroup\n

      \n
    • \n
    • \n

      \n AWS::AppRegistry::Application\n

      \n
    • \n
    • \n

      \n AWS::AppRegistry::ApplicationResourceGroups\n

      \n
    • \n
    • \n

      \n AWS::CloudFormation::Stack\n

      \n
    • \n
    • \n

      \n AWS::EC2::CapacityReservationPool\n

      \n
    • \n
    • \n

      \n AWS::EC2::HostManagement\n

      \n
    • \n
    • \n

      \n AWS::NetworkFirewall::RuleGroup\n

      \n
    • \n
    \n
  • \n
" } }, "MaxResults": { @@ -2461,6 +3061,121 @@ "smithy.api#output": {} } }, + "com.amazonaws.resourcegroups#ListTagSyncTasks": { + "type": "operation", + "input": { + "target": "com.amazonaws.resourcegroups#ListTagSyncTasksInput" + }, + "output": { + "target": "com.amazonaws.resourcegroups#ListTagSyncTasksOutput" + }, + "errors": [ + { + "target": "com.amazonaws.resourcegroups#BadRequestException" + }, + { + "target": "com.amazonaws.resourcegroups#ForbiddenException" + }, + { + "target": "com.amazonaws.resourcegroups#InternalServerErrorException" + }, + { + "target": "com.amazonaws.resourcegroups#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.resourcegroups#TooManyRequestsException" + }, + { + "target": "com.amazonaws.resourcegroups#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of tag-sync tasks.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:ListTagSyncTasks with the group passed in the filters as the resource \n or * if using no filters

    \n
  • \n
", + "smithy.api#http": { + "method": "POST", + "uri": "/list-tag-sync-tasks", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "TagSyncTasks", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.resourcegroups#ListTagSyncTasksFilter": { + "type": "structure", + "members": { + "GroupArn": { + "target": "com.amazonaws.resourcegroups#GroupArnV2", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the application group.

" + } + }, + "GroupName": { + "target": "com.amazonaws.resourcegroups#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the application group.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Returns tag-sync tasks filtered by the Amazon resource name (ARN) or name of a \n specified application group.

" + } + }, + "com.amazonaws.resourcegroups#ListTagSyncTasksFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.resourcegroups#ListTagSyncTasksFilter" + } + }, + "com.amazonaws.resourcegroups#ListTagSyncTasksInput": { + "type": "structure", + "members": { + "Filters": { + "target": "com.amazonaws.resourcegroups#ListTagSyncTasksFilterList", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) or name of the application group for which you want to return a \n list of tag-sync tasks.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.resourcegroups#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be included in the response.

" + } + }, + "NextToken": { + "target": "com.amazonaws.resourcegroups#NextToken", + "traits": { + "smithy.api#documentation": "

The parameter for receiving additional results if you receive a \n NextToken response in a previous request. A NextToken \n response indicates that more output is available. Set this parameter to the \n value provided by a previous call's NextToken response to indicate \n where the output should continue from.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resourcegroups#ListTagSyncTasksOutput": { + "type": "structure", + "members": { + "TagSyncTasks": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskList", + "traits": { + "smithy.api#documentation": "

A list of tag-sync tasks and information about each task.

" + } + }, + "NextToken": { + "target": "com.amazonaws.resourcegroups#NextToken", + "traits": { + "smithy.api#documentation": "

If present, indicates that more output is available than is included in the current response. \n Use this value in the NextToken request parameter in a subsequent call to the operation to get the next part of the output. \n You should repeat this until the NextToken response element comes back as null.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resourcegroups#MaxResults": { "type": "integer", "traits": { @@ -2506,6 +3221,16 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.resourcegroups#Owner": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 300 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, "com.amazonaws.resourcegroups#PendingResource": { "type": "structure", "members": { @@ -2569,7 +3294,7 @@ "Group": { "target": "com.amazonaws.resourcegroups#GroupString", "traits": { - "smithy.api#documentation": "

The name or ARN of the resource group with the configuration that you want to\n update.

" + "smithy.api#documentation": "

The name or Amazon resource name (ARN) of the resource group with the configuration that you want to\n update.

" } }, "Configuration": { @@ -2767,7 +3492,7 @@ "ResourceArn": { "target": "com.amazonaws.resourcegroups#ResourceArn", "traits": { - "smithy.api#documentation": "

The ARN of a resource.

" + "smithy.api#documentation": "

The Amazon resource name (ARN) of a resource.

" } }, "ResourceType": { @@ -2793,7 +3518,7 @@ "Type": { "target": "com.amazonaws.resourcegroups#QueryType", "traits": { - "smithy.api#documentation": "

The type of the query to perform. This can have one of two values:

\n
    \n
  • \n

    \n \n CLOUDFORMATION_STACK_1_0:\n Specifies that you\n want the group to contain the members of an CloudFormation stack. The Query\n contains a StackIdentifier element with an ARN for a CloudFormation\n stack.

    \n
  • \n
  • \n

    \n \n TAG_FILTERS_1_0:\n Specifies that you want the\n group to include resource that have tags that match the query.

    \n
  • \n
", + "smithy.api#documentation": "

The type of the query to perform. This can have one of two values:

\n
    \n
  • \n

    \n \n CLOUDFORMATION_STACK_1_0:\n Specifies that you\n want the group to contain the members of an CloudFormation stack. The Query\n contains a StackIdentifier element with an Amazon resource name (ARN) for a CloudFormation\n stack.

    \n
  • \n
  • \n

    \n \n TAG_FILTERS_1_0:\n Specifies that you want the\n group to include resource that have tags that match the query.

    \n
  • \n
", "smithy.api#required": {} } }, @@ -2840,6 +3565,16 @@ "smithy.api#pattern": "^AWS::[a-zA-Z0-9]+::\\w+$" } }, + "com.amazonaws.resourcegroups#RoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" + } + }, "com.amazonaws.resourcegroups#SearchResources": { "type": "operation", "input": { @@ -2936,6 +3671,126 @@ "smithy.api#output": {} } }, + "com.amazonaws.resourcegroups#StartTagSyncTask": { + "type": "operation", + "input": { + "target": "com.amazonaws.resourcegroups#StartTagSyncTaskInput" + }, + "output": { + "target": "com.amazonaws.resourcegroups#StartTagSyncTaskOutput" + }, + "errors": [ + { + "target": "com.amazonaws.resourcegroups#BadRequestException" + }, + { + "target": "com.amazonaws.resourcegroups#ForbiddenException" + }, + { + "target": "com.amazonaws.resourcegroups#InternalServerErrorException" + }, + { + "target": "com.amazonaws.resourcegroups#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.resourcegroups#NotFoundException" + }, + { + "target": "com.amazonaws.resourcegroups#TooManyRequestsException" + }, + { + "target": "com.amazonaws.resourcegroups#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new tag-sync task to onboard and sync resources tagged with a specific tag key-value pair to an \n application.

\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:StartTagSyncTask on the application group

    \n
  • \n
  • \n

    \n resource-groups:CreateGroup\n

    \n
  • \n
  • \n

    \n iam:PassRole on the role provided in the request

    \n
  • \n
", + "smithy.api#http": { + "method": "POST", + "uri": "/start-tag-sync-task", + "code": 200 + } + } + }, + "com.amazonaws.resourcegroups#StartTagSyncTaskInput": { + "type": "structure", + "members": { + "Group": { + "target": "com.amazonaws.resourcegroups#GroupStringV2", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) or name of the application group for which you want to create a tag-sync task.

", + "smithy.api#required": {} + } + }, + "TagKey": { + "target": "com.amazonaws.resourcegroups#TagKey", + "traits": { + "smithy.api#documentation": "

The tag key. Resources tagged with this tag key-value pair will be added to \n the application. If a resource with this tag is later untagged, the tag-sync task removes\n the resource from the application.

", + "smithy.api#required": {} + } + }, + "TagValue": { + "target": "com.amazonaws.resourcegroups#TagValue", + "traits": { + "smithy.api#documentation": "

The tag value. Resources tagged with this tag key-value pair will be added to \n the application. If a resource with this tag is later untagged, the tag-sync task removes\n the resource from the application.

", + "smithy.api#required": {} + } + }, + "RoleArn": { + "target": "com.amazonaws.resourcegroups#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.resourcegroups#StartTagSyncTaskOutput": { + "type": "structure", + "members": { + "GroupArn": { + "target": "com.amazonaws.resourcegroups#GroupArnV2", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the application group for which you want to add or remove resources.

" + } + }, + "GroupName": { + "target": "com.amazonaws.resourcegroups#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the application group to onboard and sync resources.

" + } + }, + "TaskArn": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the new tag-sync task.

" + } + }, + "TagKey": { + "target": "com.amazonaws.resourcegroups#TagKey", + "traits": { + "smithy.api#documentation": "

The tag key of the tag-sync task.

" + } + }, + "TagValue": { + "target": "com.amazonaws.resourcegroups#TagValue", + "traits": { + "smithy.api#documentation": "

The tag value of the tag-sync task.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.resourcegroups#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.resourcegroups#Tag": { "type": "operation", "input": { @@ -2965,7 +3820,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds tags to a resource group with the specified ARN. Existing tags on a resource\n group are not changed if they are not specified in the request parameters.

\n \n

Do not store personally identifiable information (PII) or other confidential or\n sensitive information in tags. We use tags to provide you with billing and\n administration services. Tags are not intended to be used for private or sensitive\n data.

\n
\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:Tag\n

    \n
  • \n
", + "smithy.api#documentation": "

Adds tags to a resource group with the specified Amazon resource name (ARN). Existing tags on a resource\n group are not changed if they are not specified in the request parameters.

\n \n

Do not store personally identifiable information (PII) or other confidential or\n sensitive information in tags. We use tags to provide you with billing and\n administration services. Tags are not intended to be used for private or sensitive\n data.

\n
\n

\n Minimum permissions\n

\n

To run this command, you must have the following permissions:

\n
    \n
  • \n

    \n resource-groups:Tag\n

    \n
  • \n
", "smithy.api#http": { "method": "PUT", "uri": "/resources/{Arn}/tags", @@ -2977,9 +3832,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the resource group to which to add tags.

", + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource group to which to add tags.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3016,9 +3871,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the tagged resource.

" + "smithy.api#documentation": "

The Amazon resource name (ARN) of the tagged resource.

" } }, "Tags": { @@ -3032,6 +3887,101 @@ "smithy.api#output": {} } }, + "com.amazonaws.resourcegroups#TagSyncTaskArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 1600 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]+)*:resource-groups:[a-z]{2}(-[a-z]+)+-\\d{1}:[0-9]{12}:group/[a-zA-Z0-9_\\.-]{1,150}/[a-z0-9]{26}/tag-sync-task/[a-z0-9]{26}$" + } + }, + "com.amazonaws.resourcegroups#TagSyncTaskItem": { + "type": "structure", + "members": { + "GroupArn": { + "target": "com.amazonaws.resourcegroups#GroupArnV2", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the application group.

" + } + }, + "GroupName": { + "target": "com.amazonaws.resourcegroups#GroupName", + "traits": { + "smithy.api#documentation": "

The name of the application group.

" + } + }, + "TaskArn": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the tag-sync task.

" + } + }, + "TagKey": { + "target": "com.amazonaws.resourcegroups#TagKey", + "traits": { + "smithy.api#documentation": "

The tag key.

" + } + }, + "TagValue": { + "target": "com.amazonaws.resourcegroups#TagValue", + "traits": { + "smithy.api#documentation": "

The tag value.

" + } + }, + "RoleArn": { + "target": "com.amazonaws.resourcegroups#RoleArn", + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the role assumed by the service to tag and untag resources on your behalf.

" + } + }, + "Status": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskStatus", + "traits": { + "smithy.api#documentation": "

The status of the tag-sync task.

\n

Valid values include:

\n
    \n
  • \n

    \n ACTIVE - The tag-sync task is actively managing resources in \n the application by adding or removing the awsApplication tag from resources \n when they are tagged or untagged with the specified tag key-value pair. \n

    \n
  • \n
  • \n

    \n ERROR - The tag-sync task is not actively managing resources \n in the application. Review the ErrorMessage for more information about \n resolving the error. \n

    \n
  • \n
" + } + }, + "ErrorMessage": { + "target": "com.amazonaws.resourcegroups#ErrorMessage", + "traits": { + "smithy.api#documentation": "

The specific error message in cases where the tag-sync task status\n is Error.

" + } + }, + "CreatedAt": { + "target": "com.amazonaws.resourcegroups#timestamp", + "traits": { + "smithy.api#documentation": "

The timestamp of when the tag-sync task was created.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon resource name (ARN) of the tag-sync task.

" + } + }, + "com.amazonaws.resourcegroups#TagSyncTaskList": { + "type": "list", + "member": { + "target": "com.amazonaws.resourcegroups#TagSyncTaskItem" + } + }, + "com.amazonaws.resourcegroups#TagSyncTaskStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ACTIVE" + } + }, + "ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ERROR" + } + } + } + }, "com.amazonaws.resourcegroups#TagValue": { "type": "string", "traits": { @@ -3118,16 +4068,16 @@ "type": "structure", "members": { "Group": { - "target": "com.amazonaws.resourcegroups#GroupString", + "target": "com.amazonaws.resourcegroups#GroupStringV2", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group from which to remove the resources.

", + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group from which to remove the resources.

", "smithy.api#required": {} } }, "ResourceArns": { "target": "com.amazonaws.resourcegroups#ResourceArnList", "traits": { - "smithy.api#documentation": "

The ARNs of the resources to be removed from the group.

", + "smithy.api#documentation": "

The Amazon resource names (ARNs) of the resources to be removed from the group.

", "smithy.api#required": {} } } @@ -3203,9 +4153,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the resource group from which to remove tags. The command removed both the\n specified keys and any values associated with those keys.

", + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource group from which to remove tags. The command removed both the\n specified keys and any values associated with those keys.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3226,9 +4176,9 @@ "type": "structure", "members": { "Arn": { - "target": "com.amazonaws.resourcegroups#GroupArn", + "target": "com.amazonaws.resourcegroups#GroupArnV2", "traits": { - "smithy.api#documentation": "

The ARN of the resource group from which tags have been removed.

" + "smithy.api#documentation": "

The Amazon resource name (ARN) of the resource group from which tags have been removed.

" } }, "Keys": { @@ -3282,7 +4232,7 @@ "GroupLifecycleEventsDesiredStatus": { "target": "com.amazonaws.resourcegroups#GroupLifecycleEventsDesiredStatus", "traits": { - "smithy.api#documentation": "

Specifies whether you want to turn group lifecycle events on or off.

" + "smithy.api#documentation": "

Specifies whether you want to turn group lifecycle events on or off.

\n

You can't turn on group lifecycle events if your resource groups quota is greater than 2,000.

" } } }, @@ -3354,9 +4304,9 @@ } }, "Group": { - "target": "com.amazonaws.resourcegroups#GroupString", + "target": "com.amazonaws.resourcegroups#GroupStringV2", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group to modify.

" + "smithy.api#documentation": "

The name or the ARN of the resource group to update.

" } }, "Description": { @@ -3364,6 +4314,24 @@ "traits": { "smithy.api#documentation": "

The new description that you want to update the resource group with. Descriptions can\n contain letters, numbers, hyphens, underscores, periods, and spaces.

" } + }, + "Criticality": { + "target": "com.amazonaws.resourcegroups#Criticality", + "traits": { + "smithy.api#documentation": "

The critical rank of the application group on a scale of 1 to 10, with a \n rank of 1 being the most critical, and a rank of 10 being least critical.

" + } + }, + "Owner": { + "target": "com.amazonaws.resourcegroups#Owner", + "traits": { + "smithy.api#documentation": "

A name, email address or other identifier for the person or group \n who is considered as the owner of this application group within your organization.

" + } + }, + "DisplayName": { + "target": "com.amazonaws.resourcegroups#DisplayName", + "traits": { + "smithy.api#documentation": "

The name of the application group, which you can change at any time.

" + } } }, "traits": { @@ -3436,7 +4404,7 @@ "Group": { "target": "com.amazonaws.resourcegroups#GroupString", "traits": { - "smithy.api#documentation": "

The name or the ARN of the resource group to query.

" + "smithy.api#documentation": "

The name or the Amazon resource name (ARN) of the resource group to query.

" } }, "ResourceQuery": { @@ -3464,6 +4432,9 @@ "traits": { "smithy.api#output": {} } + }, + "com.amazonaws.resourcegroups#timestamp": { + "type": "timestamp" } } } diff --git a/models/robomaker.json b/models/robomaker.json index dcdd2970fa..bce47bd0a1 100644 --- a/models/robomaker.json +++ b/models/robomaker.json @@ -94,7 +94,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes one or more worlds in a batch operation.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Deletes one or more worlds in a batch operation.

", "smithy.api#http": { "method": "POST", "uri": "/batchDeleteWorlds", @@ -154,7 +154,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes one or more simulation jobs.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes one or more simulation jobs.

", "smithy.api#http": { "method": "POST", "uri": "/batchDescribeSimulationJob", @@ -255,7 +255,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Cancels the specified deployment job.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Cancels the specified deployment job.

", "smithy.api#http": { "method": "POST", "uri": "/cancelDeploymentJob", @@ -314,7 +314,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels the specified simulation job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Cancels the specified simulation job.

", "smithy.api#http": { "method": "POST", "uri": "/cancelSimulationJob", @@ -345,7 +345,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels a simulation job batch. When you cancel a simulation job batch, you are also\n cancelling all of the active simulation jobs created as part of the batch.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Cancels a simulation job batch. When you cancel a simulation job batch, you are also\n cancelling all of the active simulation jobs created as part of the batch.

", "smithy.api#http": { "method": "POST", "uri": "/cancelSimulationJobBatch", @@ -420,7 +420,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels the specified export job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Cancels the specified export job.

", "smithy.api#http": { "method": "POST", "uri": "/cancelWorldExportJob", @@ -473,7 +473,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels the specified world generator job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Cancels the specified world generator job.

", "smithy.api#http": { "method": "POST", "uri": "/cancelWorldGenerationJob", @@ -646,7 +646,7 @@ "smithy.api#deprecated": { "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Deploys a specific version of a robot application to robots in a fleet.

\n \n

This API is no longer supported and will throw an error if used.

\n
\n

The robot application must have a numbered applicationVersion for\n consistency reasons. To create a new version, use\n CreateRobotApplicationVersion or see Creating a Robot Application Version.

\n \n

After 90 days, deployment jobs expire and will be deleted. They will no longer be\n accessible.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page.

\n
\n

Deploys a specific version of a robot application to robots in a fleet.

\n

The robot application must have a numbered applicationVersion for\n consistency reasons. To create a new version, use CreateRobotApplicationVersion or see \n Creating a Robot Application Version.\n

\n \n

After 90 days, deployment jobs expire and will be deleted. They will no longer be\n accessible.

\n
", "smithy.api#http": { "method": "POST", "uri": "/createDeploymentJob", @@ -790,7 +790,7 @@ "smithy.api#deprecated": { "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Creates a fleet, a logical group of robots running the same robot application.

\n \n

This API is no longer supported and will throw an error if used.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page.

\n
\n

Creates a fleet, a logical group of robots running the same robot application.

", "smithy.api#http": { "method": "POST", "uri": "/createFleet", @@ -886,7 +886,7 @@ "smithy.api#deprecated": { "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Creates a robot.

\n \n

This API is no longer supported and will throw an error if used.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page.

\n
\n

Creates a robot.

", "smithy.api#http": { "method": "POST", "uri": "/createRobot", @@ -923,7 +923,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a robot application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a robot application.

", "smithy.api#http": { "method": "POST", "uri": "/createRobotApplication", @@ -950,7 +950,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribuition) used by the robot application.

", + "smithy.api#documentation": "

The robot software suite used by the robot application.

", "smithy.api#required": {} } }, @@ -1001,7 +1001,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribution) used by the robot application.

" + "smithy.api#documentation": "

The robot software suite used by the robot application.

" } }, "lastUpdatedAt": { @@ -1059,7 +1059,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a version of a robot application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a version of a robot application.

", "smithy.api#http": { "method": "POST", "uri": "/createRobotApplicationVersion", @@ -1130,7 +1130,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribution) used by the robot application.

" + "smithy.api#documentation": "

The robot software suite used by the robot application.

" } }, "lastUpdatedAt": { @@ -1270,7 +1270,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a simulation application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a simulation application.

", "smithy.api#http": { "method": "POST", "uri": "/createSimulationApplication", @@ -1304,7 +1304,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribution) used by the simulation application.

", + "smithy.api#documentation": "

The robot software suite used by the simulation application.

", "smithy.api#required": {} } }, @@ -1367,7 +1367,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about the robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about the robot software suite.

" } }, "renderingEngine": { @@ -1431,7 +1431,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a simulation application with a specific revision id.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a simulation application with a specific revision id.

", "smithy.api#http": { "method": "POST", "uri": "/createSimulationApplicationVersion", @@ -1508,7 +1508,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about the robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about the robot software suite.

" } }, "renderingEngine": { @@ -1572,7 +1572,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a simulation job.

\n \n

After 90 days, simulation jobs expire and will be deleted. They will no longer be\n accessible.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a simulation job.

\n \n

After 90 days, simulation jobs expire and will be deleted. They will no longer be\n accessible.

\n
", "smithy.api#http": { "method": "POST", "uri": "/createSimulationJob", @@ -1823,7 +1823,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a world export job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a world export job.

", "smithy.api#http": { "method": "POST", "uri": "/createWorldExportJob", @@ -1957,7 +1957,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates worlds using the specified template.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates worlds using the specified template.

", "smithy.api#http": { "method": "POST", "uri": "/createWorldGenerationJob", @@ -2097,7 +2097,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a world template.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Creates a world template.

", "smithy.api#http": { "method": "POST", "uri": "/createWorldTemplate", @@ -2333,7 +2333,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Deletes a fleet.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Deletes a fleet.

", "smithy.api#http": { "method": "POST", "uri": "/deleteFleet", @@ -2392,7 +2392,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Deletes a robot.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Deletes a robot.

", "smithy.api#http": { "method": "POST", "uri": "/deleteRobot", @@ -2420,7 +2420,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a robot application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Deletes a robot application.

", "smithy.api#http": { "method": "POST", "uri": "/deleteRobotApplication", @@ -2504,7 +2504,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a simulation application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Deletes a simulation application.

", "smithy.api#http": { "method": "POST", "uri": "/deleteSimulationApplication", @@ -2563,7 +2563,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a world template.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Deletes a world template.

", "smithy.api#http": { "method": "POST", "uri": "/deleteWorldTemplate", @@ -3003,7 +3003,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Deregisters a robot.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Deregisters a robot.

", "smithy.api#http": { "method": "POST", "uri": "/deregisterRobot", @@ -3085,7 +3085,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Describes a deployment job.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Describes a deployment job.

", "smithy.api#http": { "method": "POST", "uri": "/describeDeploymentJob", @@ -3208,7 +3208,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Describes a fleet.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Describes a fleet.

", "smithy.api#http": { "method": "POST", "uri": "/describeFleet", @@ -3319,7 +3319,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Describes a robot.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

Describes a robot.

", "smithy.api#http": { "method": "POST", "uri": "/describeRobot", @@ -3350,7 +3350,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a robot application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a robot application.

", "smithy.api#http": { "method": "POST", "uri": "/describeRobotApplication", @@ -3409,7 +3409,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribution) used by the robot application.

" + "smithy.api#documentation": "

The robot software suite used by the robot application.

" } }, "revisionId": { @@ -3559,7 +3559,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a simulation application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a simulation application.

", "smithy.api#http": { "method": "POST", "uri": "/describeSimulationApplication", @@ -3624,7 +3624,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about the robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about the robot software suite.

" } }, "renderingEngine": { @@ -3691,7 +3691,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a simulation job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a simulation job.

", "smithy.api#http": { "method": "POST", "uri": "/describeSimulationJob", @@ -3719,7 +3719,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a simulation job batch.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a simulation job batch.

", "smithy.api#http": { "method": "POST", "uri": "/describeSimulationJobBatch", @@ -3996,7 +3996,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a world.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a world.

", "smithy.api#http": { "method": "POST", "uri": "/describeWorld", @@ -4027,7 +4027,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a world export job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a world export job.

", "smithy.api#http": { "method": "POST", "uri": "/describeWorldExportJob", @@ -4138,7 +4138,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a world generation job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a world generation job.

", "smithy.api#http": { "method": "POST", "uri": "/describeWorldGenerationJob", @@ -4317,7 +4317,7 @@ } ], "traits": { - "smithy.api#documentation": "

Describes a world template.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Describes a world template.

", "smithy.api#http": { "method": "POST", "uri": "/describeWorldTemplate", @@ -4710,7 +4710,7 @@ } ], "traits": { - "smithy.api#documentation": "

Gets the world template body.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Gets the world template body.

", "smithy.api#http": { "method": "POST", "uri": "/getWorldTemplateBody", @@ -4936,7 +4936,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

\n Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.\n

", "smithy.api#http": { "method": "POST", "uri": "/listDeploymentJobs", @@ -5028,7 +5028,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Returns a list of fleets. You can optionally provide filters to retrieve specific fleets.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

\n Returns a list of fleets. You can optionally provide filters to retrieve specific fleets.\n

", "smithy.api#http": { "method": "POST", "uri": "/listFleets", @@ -5114,7 +5114,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of robot application. You can optionally provide filters to retrieve\n specific robot applications.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Returns a list of robot application. You can optionally provide filters to retrieve\n specific robot applications.

", "smithy.api#http": { "method": "POST", "uri": "/listRobotApplications", @@ -5206,7 +5206,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Returns a list of robots. You can optionally provide filters to retrieve specific robots.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

\n Returns a list of robots. You can optionally provide filters to retrieve specific robots.\n

", "smithy.api#http": { "method": "POST", "uri": "/listRobots", @@ -5292,7 +5292,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of simulation applications. You can optionally provide filters to\n retrieve specific simulation applications.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Returns a list of simulation applications. You can optionally provide filters to\n retrieve specific simulation applications.

", "smithy.api#http": { "method": "POST", "uri": "/listSimulationApplications", @@ -5375,7 +5375,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list simulation job batches. You can optionally provide filters to retrieve\n specific simulation batch jobs.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Returns a list simulation job batches. You can optionally provide filters to retrieve\n specific simulation batch jobs.

", "smithy.api#http": { "method": "POST", "uri": "/listSimulationJobBatches", @@ -5455,7 +5455,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of simulation jobs. You can optionally provide filters to retrieve\n specific simulation jobs.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Returns a list of simulation jobs. You can optionally provide filters to retrieve\n specific simulation jobs.

", "smithy.api#http": { "method": "POST", "uri": "/listSimulationJobs", @@ -5539,7 +5539,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all tags on a AWS RoboMaker resource.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Lists all tags on a AWS RoboMaker resource.

", "smithy.api#http": { "method": "GET", "uri": "/tags/{resourceArn}", @@ -5597,7 +5597,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists world export jobs.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Lists world export jobs.

", "smithy.api#http": { "method": "POST", "uri": "/listWorldExportJobs", @@ -5678,7 +5678,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists world generator jobs.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Lists world generator jobs.

", "smithy.api#http": { "method": "POST", "uri": "/listWorldGenerationJobs", @@ -5759,7 +5759,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists world templates.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Lists world templates.

", "smithy.api#http": { "method": "POST", "uri": "/listWorldTemplates", @@ -5833,7 +5833,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists worlds.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Lists worlds.

", "smithy.api#http": { "method": "POST", "uri": "/listWorlds", @@ -6154,7 +6154,7 @@ "smithy.api#deprecated": { "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Registers a robot with a fleet.

\n \n

This API is no longer supported and will throw an error if used.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Registers a robot with a fleet.

\n \n

This API is no longer supported and will throw an error if used. For more information, see the January 31, 2022 update in the Support policy page.

\n
", "smithy.api#http": { "method": "POST", "uri": "/registerRobot", @@ -6313,7 +6313,7 @@ } ], "traits": { - "smithy.api#documentation": "

Restarts a running simulation job.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Restarts a running simulation job.

", "smithy.api#http": { "method": "POST", "uri": "/restartSimulationJob", @@ -6533,7 +6533,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about a robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about a robot software suite.

" } } }, @@ -6650,18 +6650,18 @@ "name": { "target": "com.amazonaws.robomaker#RobotSoftwareSuiteType", "traits": { - "smithy.api#documentation": "

The name of the robot software suite (ROS distribution).

" + "smithy.api#documentation": "

The name of the robot software suite. General is the only supported value.

" } }, "version": { "target": "com.amazonaws.robomaker#RobotSoftwareSuiteVersionType", "traits": { - "smithy.api#documentation": "

The version of the robot software suite (ROS distribution).

" + "smithy.api#documentation": "

The version of the robot software suite. Not applicable for General software suite.

" } } }, "traits": { - "smithy.api#documentation": "

Information about a robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about a robot software suite.

" } }, "com.amazonaws.robomaker#RobotSoftwareSuiteType": { @@ -6937,7 +6937,7 @@ "worldConfigs": { "target": "com.amazonaws.robomaker#WorldConfigs", "traits": { - "smithy.api#documentation": "

A list of world configurations.

" + "smithy.api#documentation": "

A list of world configurations.

\n \n

This API is no longer supported and will throw an error if used.

\n
" } }, "useDefaultUploadConfigurations": { @@ -7029,7 +7029,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about a robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about a robot software suite.

" } }, "simulationSoftwareSuite": { @@ -7267,7 +7267,7 @@ "lastUpdatedAt": { "target": "com.amazonaws.robomaker#LastUpdatedAt", "traits": { - "smithy.api#documentation": "

The time, in milliseconds since the epoch, when the simulation job batch was last\n updated.

" + "smithy.api#documentation": "

The time, in milliseconds since the epoch, when the simulation job batch was last updated.\n

" } }, "createdAt": { @@ -7717,13 +7717,13 @@ "name": { "target": "com.amazonaws.robomaker#SimulationSoftwareSuiteType", "traits": { - "smithy.api#documentation": "

The name of the simulation software suite.

" + "smithy.api#documentation": "

The name of the simulation software suite. SimulationRuntime is the only supported value.

" } }, "version": { "target": "com.amazonaws.robomaker#SimulationSoftwareSuiteVersionType", "traits": { - "smithy.api#documentation": "

The version of the simulation software suite.

" + "smithy.api#documentation": "

The version of the simulation software suite. Not applicable for SimulationRuntime.

" } } }, @@ -7875,7 +7875,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts a new simulation job batch. The batch is defined using one or more\n SimulationJobRequest objects.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Starts a new simulation job batch. The batch is defined using one or more\n SimulationJobRequest objects.

", "smithy.api#http": { "method": "POST", "uri": "/startSimulationJobBatch", @@ -8038,7 +8038,7 @@ "smithy.api#deprecated": { "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." }, - "smithy.api#documentation": "

Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.

\n \n

This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.

\n
", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n \n

This API is no longer supported. For more information, see the May 2, 2022 update in the Support policy page.

\n
\n

\n Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.\n

", "smithy.api#http": { "method": "POST", "uri": "/syncDeploymentJob", @@ -8185,7 +8185,7 @@ } ], "traits": { - "smithy.api#documentation": "

Adds or edits tags for a AWS RoboMaker resource.

\n

Each tag consists of a tag key and a tag value. Tag keys and tag values are both\n required, but tag values can be empty strings.

\n

For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management\n User Guide.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Adds or edits tags for a AWS RoboMaker resource.

\n

Each tag consists of a tag key and a tag value. Tag keys and tag values are both\n required, but tag values can be empty strings.

\n

For information about the rules that apply to tag keys and tag values, see User-Defined Tag Restrictions in the AWS Billing and Cost Management\n User Guide.

", "smithy.api#http": { "method": "POST", "uri": "/tags/{resourceArn}", @@ -8407,7 +8407,7 @@ } ], "traits": { - "smithy.api#documentation": "

Removes the specified tags from the specified AWS RoboMaker resource.

\n

To remove a tag, specify the tag key. To change the tag value of an existing tag key,\n use \n TagResource\n .

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Removes the specified tags from the specified AWS RoboMaker resource.

\n

To remove a tag, specify the tag key. To change the tag value of an existing tag key,\n use \n TagResource\n .

", "smithy.api#http": { "method": "DELETE", "uri": "/tags/{resourceArn}", @@ -8472,7 +8472,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a robot application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Updates a robot application.

", "smithy.api#http": { "method": "POST", "uri": "/updateRobotApplication", @@ -8499,7 +8499,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribution) used by the robot application.

", + "smithy.api#documentation": "

The robot software suite used by the robot application.

", "smithy.api#required": {} } }, @@ -8550,7 +8550,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

The robot software suite (ROS distribution) used by the robot application.

" + "smithy.api#documentation": "

The robot software suite used by the robot application.

" } }, "lastUpdatedAt": { @@ -8602,7 +8602,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a simulation application.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Updates a simulation application.

", "smithy.api#http": { "method": "POST", "uri": "/updateSimulationApplication", @@ -8636,7 +8636,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about the robot software suite (ROS distribution).

", + "smithy.api#documentation": "

Information about the robot software suite.

", "smithy.api#required": {} } }, @@ -8699,7 +8699,7 @@ "robotSoftwareSuite": { "target": "com.amazonaws.robomaker#RobotSoftwareSuite", "traits": { - "smithy.api#documentation": "

Information about the robot software suite (ROS distribution).

" + "smithy.api#documentation": "

Information about the robot software suite.

" } }, "renderingEngine": { @@ -8754,7 +8754,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates a world template.

", + "smithy.api#documentation": "\n

End of support notice: On September 10, 2025, Amazon Web Services\n will discontinue support for Amazon Web Services RoboMaker. After September 10, 2025, you will\n no longer be able to access the Amazon Web Services RoboMaker console or Amazon Web Services RoboMaker resources. \n For more information on transitioning to Batch to help run containerized\n simulations, visit https://aws.amazon.com/blogs/hpc/run-simulations-using-multiple-containers-in-a-single-aws-batch-job/.\n

\n
\n

Updates a world template.

", "smithy.api#http": { "method": "POST", "uri": "/updateWorldTemplate", @@ -8850,7 +8850,7 @@ "name": { "target": "com.amazonaws.robomaker#Name", "traits": { - "smithy.api#documentation": "

A prefix that specifies where files will be uploaded in Amazon S3. It is appended to the\n simulation output location to determine the final path.

\n

For example, if your simulation output location is s3://my-bucket and your\n upload configuration name is robot-test, your files will be uploaded to\n s3://my-bucket///robot-test.

", + "smithy.api#documentation": "

A prefix that specifies where files will be uploaded in Amazon S3. It is appended to the\n simulation output location to determine the final path.

\n

For example, if your simulation output location is s3://amzn-s3-demo-bucket and your\n upload configuration name is robot-test, your files will be uploaded to\n s3://amzn-s3-demo-bucket///robot-test.

", "smithy.api#required": {} } }, diff --git a/models/route53resolver.json b/models/route53resolver.json index 0ca0cefc6d..531e53abe8 100644 --- a/models/route53resolver.json +++ b/models/route53resolver.json @@ -1057,7 +1057,7 @@ "DestinationArn": { "target": "com.amazonaws.route53resolver#DestinationArn", "traits": { - "smithy.api#documentation": "

The ARN of the resource that you want Resolver to send query logs. You can send query logs to an S3 bucket, a CloudWatch Logs log group, \n\t\t\tor a Kinesis Data Firehose delivery stream. Examples of valid values include the following:

\n
    \n
  • \n

    \n S3 bucket:

    \n

    \n arn:aws:s3:::examplebucket\n

    \n

    You can optionally append a file prefix to the end of the ARN.

    \n

    \n arn:aws:s3:::examplebucket/development/\n

    \n
  • \n
  • \n

    \n CloudWatch Logs log group:

    \n

    \n arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:*\n

    \n
  • \n
  • \n

    \n Kinesis Data Firehose delivery stream:

    \n

    \n arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name\n

    \n
  • \n
", + "smithy.api#documentation": "

The ARN of the resource that you want Resolver to send query logs. You can send query logs to an S3 bucket, a CloudWatch Logs log group, \n\t\t\tor a Kinesis Data Firehose delivery stream. Examples of valid values include the following:

\n
    \n
  • \n

    \n S3 bucket:

    \n

    \n arn:aws:s3:::amzn-s3-demo-bucket\n

    \n

    You can optionally append a file prefix to the end of the ARN.

    \n

    \n arn:aws:s3:::amzn-s3-demo-bucket/development/\n

    \n
  • \n
  • \n

    \n CloudWatch Logs log group:

    \n

    \n arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:*\n

    \n
  • \n
  • \n

    \n Kinesis Data Firehose delivery stream:

    \n

    \n arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -7961,6 +7961,15 @@ "target": "com.amazonaws.route53resolver#ResourceId" } }, + "com.amazonaws.route53resolver#ServerNameIndication": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 255 + } + } + }, "com.amazonaws.route53resolver#ServicePrinciple": { "type": "string", "traits": { @@ -8206,6 +8215,12 @@ "traits": { "smithy.api#documentation": "

\n\t\t\tThe protocols for the Resolver endpoints. DoH-FIPS is applicable for inbound endpoints only.\n\t\t\t\n\t\t

\n

For an inbound endpoint you can apply the protocols as follows:

\n
    \n
  • \n

    Do53 and DoH in combination.

    \n
  • \n
  • \n

    Do53 and DoH-FIPS in combination.

    \n
  • \n
  • \n

    Do53 alone.

    \n
  • \n
  • \n

    DoH alone.

    \n
  • \n
  • \n

    DoH-FIPS alone.

    \n
  • \n
  • \n

    None, which is treated as Do53.

    \n
  • \n
\n

For an outbound endpoint you can apply the protocols as follows:

\n
    \n
  • \n

    Do53 and DoH in combination.

    \n
  • \n
  • \n

    Do53 alone.

    \n
  • \n
  • \n

    DoH alone.

    \n
  • \n
  • \n

    None, which is treated as Do53.

    \n
  • \n
" } + }, + "ServerNameIndication": { + "target": "com.amazonaws.route53resolver#ServerNameIndication", + "traits": { + "smithy.api#documentation": "

\n\t\t\tThe Server Name Indication of the DoH server that you want to forward queries to. \n\t\t\tThis is only used if the Protocol of the TargetAddress is DoH.\n\t\t

" + } } }, "traits": { @@ -8648,7 +8663,7 @@ "Qtype": { "target": "com.amazonaws.route53resolver#Qtype", "traits": { - "smithy.api#documentation": "

\n\t\t\tThe DNS query type you want the rule to evaluate. Allowed values are;\n\t\t

\n
    \n
  • \n

    \n\t\t\t\tA: Returns an IPv4 address.

    \n
  • \n
  • \n

    AAAA: Returns an Ipv6 address.

    \n
  • \n
  • \n

    CAA: Restricts CAs that can create SSL/TLS certifications for the domain.

    \n
  • \n
  • \n

    CNAME: Returns another domain name.

    \n
  • \n
  • \n

    DS: Record that identifies the DNSSEC signing key of a delegated zone.

    \n
  • \n
  • \n

    MX: Specifies mail servers.

    \n
  • \n
  • \n

    NAPTR: Regular-expression-based rewriting of domain names.

    \n
  • \n
  • \n

    NS: Authoritative name servers.

    \n
  • \n
  • \n

    PTR: Maps an IP address to a domain name.

    \n
  • \n
  • \n

    SOA: Start of authority record for the zone.

    \n
  • \n
  • \n

    SPF: Lists the servers authorized to send emails from a domain.

    \n
  • \n
  • \n

    SRV: Application specific values that identify servers.

    \n
  • \n
  • \n

    TXT: Verifies email senders and application-specific values.

    \n
  • \n
  • \n

    A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be\n\t\t\t\tdefined as TYPENUMBER, where the\n\t\t\t\tNUMBER can be 1-65334, for\n\t\t\t\texample, TYPE28. For more information, see \n\t\t\t\tList of DNS record types.

    \n
  • \n
" + "smithy.api#documentation": "

\n\t\t\tThe DNS query type you want the rule to evaluate. Allowed values are;\n\t\t

\n
    \n
  • \n

    \n\t\t\t\tA: Returns an IPv4 address.

    \n
  • \n
  • \n

    AAAA: Returns an Ipv6 address.

    \n
  • \n
  • \n

    CAA: Restricts CAs that can create SSL/TLS certifications for the domain.

    \n
  • \n
  • \n

    CNAME: Returns another domain name.

    \n
  • \n
  • \n

    DS: Record that identifies the DNSSEC signing key of a delegated zone.

    \n
  • \n
  • \n

    MX: Specifies mail servers.

    \n
  • \n
  • \n

    NAPTR: Regular-expression-based rewriting of domain names.

    \n
  • \n
  • \n

    NS: Authoritative name servers.

    \n
  • \n
  • \n

    PTR: Maps an IP address to a domain name.

    \n
  • \n
  • \n

    SOA: Start of authority record for the zone.

    \n
  • \n
  • \n

    SPF: Lists the servers authorized to send emails from a domain.

    \n
  • \n
  • \n

    SRV: Application specific values that identify servers.

    \n
  • \n
  • \n

    TXT: Verifies email senders and application-specific values.

    \n
  • \n
  • \n

    A query type you define by using the DNS type ID, for example 28 for AAAA. The values must be\n\t\t\t\tdefined as TYPENUMBER, where the\n\t\t\t\tNUMBER can be 1-65334, for\n\t\t\t\texample, TYPE28. For more information, see \n\t\t\t\tList of DNS record types.

    \n \n

    If you set up a firewall BLOCK rule with action NXDOMAIN on query type equals AAAA, \n\t\t\t\t\tthis action will not be applied to synthetic IPv6 addresses generated when DNS64 is enabled.

    \n
    \n
  • \n
" } } }, diff --git a/models/s3.json b/models/s3.json index ea40691cde..eb0e8e0f02 100644 --- a/models/s3.json +++ b/models/s3.json @@ -17661,6 +17661,12 @@ "traits": { "smithy.api#documentation": "

Date the bucket was created. This date can change when making changes to your bucket,\n such as editing its bucket policy.

" } + }, + "BucketRegion": { + "target": "com.amazonaws.s3#BucketRegion", + "traits": { + "smithy.api#documentation": "

\n BucketRegion indicates the Amazon Web Services region where the bucket is located. If the request contains at least one valid parameter, it is included in the response.

" + } } }, "traits": { @@ -17984,6 +17990,9 @@ "com.amazonaws.s3#BucketName": { "type": "string" }, + "com.amazonaws.s3#BucketRegion": { + "type": "string" + }, "com.amazonaws.s3#BucketType": { "type": "enum", "members": { @@ -18207,13 +18216,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -18326,7 +18335,7 @@ "target": "com.amazonaws.s3#CompleteMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "

Completes a multipart upload by assembling previously uploaded parts.

\n

You first initiate the multipart upload and then upload all parts using the UploadPart\n operation or the UploadPartCopy\n operation. After successfully uploading all relevant parts of an upload, you call this\n CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the CompleteMultipartUpload \n request, you must provide the parts list and ensure that the parts list is complete.\n The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list,\n you must provide the PartNumber value and the ETag value that are returned after that part\n was uploaded.

\n

The processing of a CompleteMultipartUpload request could take several minutes to\n finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK response can\n contain either a success or an error. The error response might be embedded in the 200 OK response. \n If you call this API operation directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throw an exception (or, for\n the SDKs that don't use exceptions, they return an error).

\n

Note that if CompleteMultipartUpload fails, applications should be prepared\n to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best\n Practices.

\n \n

You can't use Content-Type: application/x-www-form-urlencoded for the \n CompleteMultipartUpload requests. Also, if you don't provide a\n Content-Type header, CompleteMultipartUpload can still return a 200\n OK response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For\n information about permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
  • \n

    If you provide an additional checksum\n value in your MultipartUpload requests and the\n object is encrypted with Key Management Service, you must have permission to use the\n kms:Decrypt action for the\n CompleteMultipartUpload request to succeed.

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: EntityTooSmall\n

    \n
      \n
    • \n

      Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPart\n

    \n
      \n
    • \n

      Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified ETag might not have\n matched the uploaded part's ETag.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPartOrder\n

    \n
      \n
    • \n

      Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CompleteMultipartUpload:

\n ", + "smithy.api#documentation": "

Completes a multipart upload by assembling previously uploaded parts.

\n

You first initiate the multipart upload and then upload all parts using the UploadPart\n operation or the UploadPartCopy\n operation. After successfully uploading all relevant parts of an upload, you call this\n CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts\n in ascending order by part number to create a new object. In the CompleteMultipartUpload \n request, you must provide the parts list and ensure that the parts list is complete.\n The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list,\n you must provide the PartNumber value and the ETag value that are returned after that part\n was uploaded.

\n

The processing of a CompleteMultipartUpload request could take several minutes to\n finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that\n specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white\n space characters to keep the connection from timing out. A request could fail after the\n initial 200 OK response has been sent. This means that a 200 OK response can\n contain either a success or an error. The error response might be embedded in the 200 OK response. \n If you call this API operation directly, make sure to design\n your application to parse the contents of the response and handle it appropriately. If you\n use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply\n error handling per your configuration settings (including automatically retrying the\n request as appropriate). If the condition persists, the SDKs throw an exception (or, for\n the SDKs that don't use exceptions, they return an error).

\n

Note that if CompleteMultipartUpload fails, applications should be prepared\n to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best\n Practices.

\n \n

You can't use Content-Type: application/x-www-form-urlencoded for the \n CompleteMultipartUpload requests. Also, if you don't provide a\n Content-Type header, CompleteMultipartUpload can still return a 200\n OK response.

\n
\n

For more information about multipart uploads, see Uploading Objects Using Multipart\n Upload in the Amazon S3\n User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - For\n information about permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions in the Amazon S3 User Guide.

    \n

    If you provide an additional checksum\n value in your MultipartUpload requests and the\n object is encrypted with Key Management Service, you must have permission to use the\n kms:Decrypt action for the\n CompleteMultipartUpload request to succeed.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n

    If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: EntityTooSmall\n

    \n
      \n
    • \n

      Description: Your proposed upload is smaller than the minimum allowed object\n size. Each part must be at least 5 MB in size, except the last part.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPart\n

    \n
      \n
    • \n

      Description: One or more of the specified parts could not be found. The part\n might not have been uploaded, or the specified ETag might not have\n matched the uploaded part's ETag.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidPartOrder\n

    \n
      \n
    • \n

      Description: The list of parts was not in ascending order. The parts list\n must be specified in order by part number.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID\n might be invalid, or the multipart upload might have been aborted or\n completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CompleteMultipartUpload:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}", @@ -18371,13 +18380,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -18395,7 +18404,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -18409,14 +18418,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -18476,14 +18485,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -18575,13 +18584,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -18701,7 +18710,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a copy of an object that is already stored in Amazon S3.

\n \n

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.

\n
\n

You can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.

\n \n
    \n
  • \n

    Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

    \n
  • \n
\n
\n

Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.

\n \n

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer\n Acceleration.

\n
\n
\n
Authentication and authorization
\n
\n

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have\n read access to the source object and write\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key can't be set to ReadOnly on the copy destination bucket.

      \n
    • \n
    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Response and special errors
\n
\n

When the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length. You always need to read the entire response body\n to check if the copy succeeds.

\n
    \n
  • \n

    If the copy is successful, you receive a response with information about the copied\n object.

    \n
  • \n
  • \n

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK response can contain either a success or an error.

    \n
      \n
    • \n

      If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.

      \n
    • \n
    • \n

      If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      \n

      If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).

      \n
    • \n
    \n
  • \n
\n
\n
Charge
\n
\n

The copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CopyObject:

\n ", + "smithy.api#documentation": "

Creates a copy of an object that is already stored in Amazon S3.

\n \n

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy an\n object greater than 5 GB, you must use the multipart upload Upload Part - Copy\n (UploadPartCopy) API. For more information, see Copy Object Using the\n REST Multipart Upload API.

\n
\n

You can copy individual objects between general purpose buckets, between directory buckets, and \n between general purpose buckets and directory buckets.

\n \n
    \n
  • \n

    Amazon S3 supports copy operations using Multi-Region Access Points only as a destination when using the Multi-Region Access Point ARN.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    VPC endpoints don't support cross-Region requests (including copies). If you're using VPC endpoints, your source and destination buckets should be in the same Amazon Web Services Region as your VPC endpoint.

    \n
  • \n
\n
\n

Both the\n Region that you want to copy the object from and the Region that you want to copy the\n object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable \n or disable a Region for standalone accounts in the\n Amazon Web Services Account Management Guide.

\n \n

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer\n Acceleration.

\n
\n
\n
Authentication and authorization
\n
\n

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have\n read access to the source object and write\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions -\n You must have permissions in an IAM policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have\n \n s3:PutObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in a CopyObject operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key can't be set to ReadOnly on the copy destination bucket.

      \n
    • \n
    \n

    If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Response and special errors
\n
\n

When the request is an HTTP 1.1 request, the response is chunk encoded. When\n the request is not an HTTP 1.1 request, the response would not contain the\n Content-Length. You always need to read the entire response body\n to check if the copy succeeds.

\n
    \n
  • \n

    If the copy is successful, you receive a response with information about the copied\n object.

    \n
  • \n
  • \n

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. A 200 OK response can contain either a success or an error.

    \n
      \n
    • \n

      If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error.

      \n
    • \n
    • \n

      If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. For example, in a cross-region copy, you \n may encounter throttling and receive a 200 OK response. \n For more information, see Resolve \n the Error 200 response when copying objects to Amazon S3. \n The 200 OK status code means the copy was accepted, but \n it doesn't mean the copy is complete. Another example is \n when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. \n You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      \n

      If you call this API operation directly, make\n sure to design your application to parse the content of the response and handle it\n appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the\n embedded error and apply error handling per your configuration settings (including\n automatically retrying the request as appropriate). If the condition persists, the SDKs\n throw an exception (or, for the SDKs that don't use exceptions, they return an \n error).

      \n
    • \n
    \n
  • \n
\n
\n
Charge
\n
\n

The copy request charge is based on the storage class and Region that you specify for\n the destination object. The request can also result in a data retrieval charge for the\n source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see\n Amazon S3 pricing.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CopyObject:

\n ", "smithy.api#examples": [ { "title": "To copy an object", @@ -18765,7 +18774,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -18786,21 +18795,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the copied object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -18987,7 +18996,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

\n

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a default encryption\n configuration that uses server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or\n server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.

\n

When you perform a CopyObject operation, if you want to use a\n different type of encryption setting for the target object, you can specify \n appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a\n KMS key, or a customer-provided key. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence.

\n

With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when storing this object in Amazon S3. Unrecognized or unsupported values won’t write a destination object and will receive a 400 Bad Request response.

\n

Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket.\n When copying an object, if you don't specify encryption information in your copy\n request, the encryption setting of the target object is set to the default\n encryption configuration of the destination bucket. By default, all buckets have a\n base level of encryption configuration that uses server-side encryption with Amazon S3\n managed keys (SSE-S3). If the destination bucket has a different default encryption\n configuration, Amazon S3 uses\n the corresponding encryption key to encrypt the target\n object copy.

\n

With server-side\n encryption, Amazon S3 encrypts your data as it writes your data to disks in its data\n centers and decrypts the data when you access it. For more information about server-side encryption, see Using\n Server-Side Encryption in the\n Amazon S3 User Guide.

\n

\n General purpose buckets \n

\n
    \n
  • \n

    For general purpose buckets, there are the following supported options for server-side encryption: server-side encryption with Key Management Service (KMS) keys\n (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), and \n server-side encryption with customer-provided encryption keys (SSE-C). Amazon S3 uses\n the corresponding KMS key, or a customer-provided key to encrypt the target\n object copy.

    \n
  • \n
  • \n

    When you perform a CopyObject operation, if you want to use a\n different type of encryption setting for the target object, you can specify \n appropriate encryption-related headers to encrypt the target object with an Amazon S3 managed key, a\n KMS key, or a customer-provided key. If the encryption setting in\n your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence.

    \n
  • \n
\n

\n Directory buckets \n

\n
    \n
  • \n

    For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

    \n
  • \n
  • \n

    To encrypt new object copies to a directory bucket with SSE-KMS, we recommend you specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). \n The Amazon Web Services managed key (aws/s3) isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. After you specify a customer managed key for SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS configuration. \n Then, when you perform a CopyObject operation and want to specify server-side encryption settings for new object copies with SSE-KMS in the encryption-related request headers, you must ensure the encryption key is the same customer managed key that you specified for the directory bucket's default encryption configuration. \n

    \n
  • \n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -19029,21 +19038,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", + "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an\n object protected by KMS will fail if they're not made via SSL or using SigV4. For\n information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see\n Specifying the\n Signature Version in Request Authentication in the\n Amazon S3 User Guide.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the \n x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS \n symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. \n If you want to specify the \n x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS \n customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value must be explicitly added to specify encryption context for \n CopyObject requests.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for the destination object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs.

\n

\n General purpose buckets - This value must be explicitly added to specify encryption context for \n CopyObject requests if you want an additional encryption context for your destination object. The additional encryption context of the source object won't be copied to the destination object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object.

\n

Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n

For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.

\n \n

This functionality is not supported when the destination bucket is a directory bucket.

\n
", + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the\n object.

\n

Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Specifying this header with a COPY action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n

For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.

\n \n

\n Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

\n
", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -19139,13 +19148,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -19183,13 +19192,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -19435,7 +19444,7 @@ "target": "com.amazonaws.s3#CreateMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "

This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

\n \n

After you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stops charging you for\n storing them only after you either complete or abort a multipart upload.

\n
\n

If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart \n upload must be completed within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n \n
    \n
  • \n

    \n Directory buckets - S3 Lifecycle is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Request signing
\n
\n

For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service (KMS)\n KMS key, the requester must have permission to the\n kms:Decrypt and kms:GenerateDataKey actions on\n the key. The requester must also have permissions for the\n kms:GenerateDataKey action for the\n CreateMultipartUpload API. Then, the requester needs\n permissions for the kms:Decrypt action on the\n UploadPart and UploadPartCopy APIs. These\n permissions are required because Amazon S3 must decrypt and read data from the\n encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API and permissions and Protecting data\n using server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

    \n
      \n
    • \n

      Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.

      \n
        \n
      • \n

        \n x-amz-server-side-encryption\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-aws-kms-key-id\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-context\n

        \n
      • \n
      \n \n
        \n
      • \n

        If you specify x-amz-server-side-encryption:aws:kms, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to\n protect the data.

        \n
      • \n
      • \n

        To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

        \n
      • \n
      • \n

        If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key\n policy and your IAM user or role.

        \n
      • \n
      • \n

        All GET and PUT requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.

        \n
      • \n
      \n
      \n

      For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

      \n
    • \n
    • \n

      Use customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.

      \n
        \n
      • \n

        \n x-amz-server-side-encryption-customer-algorithm\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-customer-key\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-customer-key-MD5\n

        \n
      • \n
      \n

      For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C) in the Amazon S3 User Guide.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory buckets -For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CreateMultipartUpload:

\n ", + "smithy.api#documentation": "

This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

\n \n

After you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stops charging you for\n storing them only after you either complete or abort a multipart upload.

\n
\n

If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart \n upload must be completed within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle\n Configuration.

\n \n
    \n
  • \n

    \n Directory buckets - S3 Lifecycle is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
\n
Request signing
\n
\n

For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service (KMS)\n KMS key, the requester must have permission to the\n kms:Decrypt and kms:GenerateDataKey actions on\n the key. The requester must also have permissions for the\n kms:GenerateDataKey action for the\n CreateMultipartUpload API. Then, the requester needs\n permissions for the kms:Decrypt action on the\n UploadPart and UploadPartCopy APIs. These\n permissions are required because Amazon S3 must decrypt and read data from the\n encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API and permissions and Protecting data\n using server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose buckets - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. Amazon S3\n automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a\n multipart upload, if you don't specify encryption information in your request, the\n encryption setting of the uploaded parts is set to the default encryption configuration of\n the destination bucket. By default, all buckets have a base level of encryption\n configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the\n destination bucket has a default encryption configuration that uses server-side encryption\n with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C),\n Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded\n parts. When you perform a CreateMultipartUpload operation, if you want to use a different\n type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the\n object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a customer-provided key). When the encryption\n setting in your request is different from the default encryption configuration of the\n destination bucket, the encryption setting in your request takes precedence. If you choose\n to provide your own encryption key, the request headers you provide in UploadPart\n and UploadPartCopy requests must match the headers you used in the CreateMultipartUpload request.

    \n
      \n
    • \n

      Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key\n (aws/s3) and KMS customer managed keys stored in Key Management Service (KMS) –\n If you want Amazon Web Services to manage the keys used to encrypt data, specify the\n following headers in the request.

      \n
        \n
      • \n

        \n x-amz-server-side-encryption\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-aws-kms-key-id\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-context\n

        \n
      • \n
      \n \n
        \n
      • \n

        If you specify x-amz-server-side-encryption:aws:kms, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id,\n Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to\n protect the data.

        \n
      • \n
      • \n

        To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester\n must have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions and Protecting data using\n server-side encryption with Amazon Web Services KMS in the\n Amazon S3 User Guide.

        \n
      • \n
      • \n

        If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key,\n then you must have these permissions on the key policy. If your IAM user or role is in a different account from the key, then you must have the permissions on both the key\n policy and your IAM user or role.

        \n
      • \n
      • \n

        All GET and PUT requests for an object\n protected by KMS fail if you don't make them by using Secure Sockets\n Layer (SSL), Transport Layer Security (TLS), or Signature Version\n 4. For information about configuring any of the officially supported Amazon Web Services\n SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.

        \n
      • \n
      \n
      \n

      For more information about server-side encryption with KMS keys\n (SSE-KMS), see Protecting Data\n Using Server-Side Encryption with KMS keys in the Amazon S3 User Guide.

      \n
    • \n
    • \n

      Use customer-provided encryption keys (SSE-C) – If you want to manage\n your own encryption keys, provide all the following headers in the\n request.

      \n
        \n
      • \n

        \n x-amz-server-side-encryption-customer-algorithm\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-customer-key\n

        \n
      • \n
      • \n

        \n x-amz-server-side-encryption-customer-key-MD5\n

        \n
      • \n
      \n

      For more information about server-side encryption with customer-provided\n encryption keys (SSE-C), see \n Protecting data using server-side encryption with customer-provided\n encryption keys (SSE-C) in the Amazon S3 User Guide.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

    \n

    In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. \n You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. \n

    \n \n

    When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. \n So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n the encryption request headers must match the default encryption configuration of the directory bucket.\n\n

    \n
    \n \n

    For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, \n the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

    \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to CreateMultipartUpload:

\n ", "smithy.api#examples": [ { "title": "To initiate a multipart upload", @@ -19497,7 +19506,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -19518,21 +19527,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -19667,7 +19676,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n
    \n
  • \n

    \n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. \n

    \n

    In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. \n You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. \n

    \n \n

    When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. \n So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n the encryption request headers must match the default encryption configuration of the directory bucket.\n\n

    \n
    \n
  • \n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -19709,21 +19718,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.

\n

\n General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the \n x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS \n symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. \n If you want to specify the \n x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS \n customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with an object action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS).

\n

\n General purpose buckets - Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3\n Bucket Key.

\n

\n Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -19794,7 +19803,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. \n For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n

\n

To make Zonal endpoint API requests on a directory bucket, use the CreateSession\n API operation. Specifically, you grant s3express:CreateSession permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.

\n

If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n \n CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject.

    \n
  • \n
  • \n

    \n \n HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n

To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession permission to the bucket. In a\n policy, you can have the s3express:SessionMode condition key to\n control who can create a ReadWrite or ReadOnly session.\n For more information about ReadWrite or ReadOnly\n sessions, see \n x-amz-create-session-mode\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

\n

To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession permission.

\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", + "smithy.api#documentation": "

Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint API operations on directory buckets. \n For more information about Zonal endpoint API operations that include the Availability Zone in the request endpoint, see \n S3 Express One Zone APIs in the Amazon S3 User Guide. \n

\n

To make Zonal endpoint API requests on a directory bucket, use the CreateSession\n API operation. Specifically, you grant s3express:CreateSession permission to a\n bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the\n CreateSession API request on the bucket, which returns temporary security\n credentials that include the access key ID, secret access key, session token, and\n expiration. These credentials have associated permissions to access the Zonal endpoint API operations. After\n the session is created, you don’t need to use other policies to grant permissions to each\n Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by\n applying the temporary security credentials of the session to the request headers and\n following the SigV4 protocol for authentication. You also apply the session token to the\n x-amz-s3session-token request header for authorization. Temporary security\n credentials are scoped to the bucket and expire after 5 minutes. After the expiration time,\n any calls that you make with those credentials will fail. You must use IAM credentials\n again to make a CreateSession API request that generates a new set of\n temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond\n the original specified interval.

\n

If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid\n service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to\n initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the\n Amazon S3 User Guide.

\n \n
    \n
  • \n

    You must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n \n CopyObject API operation - Unlike other Zonal endpoint API operations, the CopyObject API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the CopyObject API operation on directory buckets, see CopyObject.

    \n
  • \n
  • \n

    \n \n HeadBucket API operation - Unlike other Zonal endpoint API operations, the HeadBucket API operation doesn't use the temporary security credentials returned from the CreateSession API operation for authentication and authorization. For information about authentication and authorization of the HeadBucket API operation on directory buckets, see HeadBucket.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n

To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that\n grants s3express:CreateSession permission to the bucket. In a\n policy, you can have the s3express:SessionMode condition key to\n control who can create a ReadWrite or ReadOnly session.\n For more information about ReadWrite or ReadOnly\n sessions, see \n x-amz-create-session-mode\n . For example policies, see\n Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

\n

To grant cross-account access to Zonal endpoint API operations, the bucket policy should also grant both accounts the s3express:CreateSession permission.

\n

If you want to encrypt objects with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key.

\n
\n
Encryption
\n
\n

For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

\n

For Zonal endpoint (object-level) API operations except CopyObject and UploadPartCopy, \nyou authenticate and authorize requests through CreateSession for low latency. \n To encrypt new objects in a directory bucket with SSE-KMS, you must specify SSE-KMS as the directory bucket's default encryption configuration with a KMS key (specifically, a customer managed key). Then, when a session is created for Zonal endpoint API operations, new objects are automatically encrypted and decrypted with SSE-KMS and S3 Bucket Keys during the session.

\n \n

\n Only 1 customer managed key is supported per directory bucket for the lifetime of the bucket. The Amazon Web Services managed key (aws/s3) isn't supported. \n After you specify SSE-KMS as your bucket's default encryption configuration with a customer managed key, you can't change the customer managed key for the bucket's SSE-KMS configuration.\n

\n
\n

In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, \n you can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) from the CreateSession request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. \n

\n \n

When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. \n Also, in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n it's not supported to override the values of the encryption settings from the CreateSession request. \n\n

\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?session", @@ -19810,6 +19819,34 @@ "com.amazonaws.s3#CreateSessionOutput": { "type": "structure", "members": { + "ServerSideEncryption": { + "target": "com.amazonaws.s3#ServerSideEncryption", + "traits": { + "smithy.api#documentation": "

The server-side encryption algorithm used when you store objects in the directory bucket.

", + "smithy.api#httpHeader": "x-amz-server-side-encryption" + } + }, + "SSEKMSKeyId": { + "target": "com.amazonaws.s3#SSEKMSKeyId", + "traits": { + "smithy.api#documentation": "

If you specify x-amz-server-side-encryption with aws:kms, this header indicates the ID of the KMS \n symmetric encryption customer managed key that was used for object encryption.

", + "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" + } + }, + "SSEKMSEncryptionContext": { + "target": "com.amazonaws.s3#SSEKMSEncryptionContext", + "traits": { + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject \n operations on this object.

", + "smithy.api#httpHeader": "x-amz-server-side-encryption-context" + } + }, + "BucketKeyEnabled": { + "target": "com.amazonaws.s3#BucketKeyEnabled", + "traits": { + "smithy.api#documentation": "

Indicates whether to use an S3 Bucket Key for server-side encryption\n with KMS keys (SSE-KMS).

", + "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" + } + }, "Credentials": { "target": "com.amazonaws.s3#SessionCredentials", "traits": { @@ -19820,7 +19857,8 @@ } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#xmlName": "CreateSessionResult" } }, "com.amazonaws.s3#CreateSessionRequest": { @@ -19829,7 +19867,7 @@ "SessionMode": { "target": "com.amazonaws.s3#SessionMode", "traits": { - "smithy.api#documentation": "

Specifies the mode of the session that will be created, either ReadWrite or\n ReadOnly. By default, a ReadWrite session is created. A\n ReadWrite session is capable of executing all the Zonal endpoint APIs on a\n directory bucket. A ReadOnly session is constrained to execute the following\n Zonal endpoint APIs: GetObject, HeadObject, ListObjectsV2,\n GetObjectAttributes, ListParts, and\n ListMultipartUploads.

", + "smithy.api#documentation": "

Specifies the mode of the session that will be created, either ReadWrite or\n ReadOnly. By default, a ReadWrite session is created. A\n ReadWrite session is capable of executing all the Zonal endpoint API operations on a\n directory bucket. A ReadOnly session is constrained to execute the following\n Zonal endpoint API operations: GetObject, HeadObject, ListObjectsV2,\n GetObjectAttributes, ListParts, and\n ListMultipartUploads.

", "smithy.api#httpHeader": "x-amz-create-session-mode" } }, @@ -19843,6 +19881,34 @@ "name": "Bucket" } } + }, + "ServerSideEncryption": { + "target": "com.amazonaws.s3#ServerSideEncryption", + "traits": { + "smithy.api#documentation": "

The server-side encryption algorithm to use when you store objects in the directory bucket.

\n

For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). By default, Amazon S3 encrypts data with SSE-S3. \n For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

", + "smithy.api#httpHeader": "x-amz-server-side-encryption" + } + }, + "SSEKMSKeyId": { + "target": "com.amazonaws.s3#SSEKMSKeyId", + "traits": { + "smithy.api#documentation": "

If you specify x-amz-server-side-encryption with aws:kms, you must specify the \n x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key ARN) of the KMS \n symmetric encryption customer managed key to use. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist in the same\n account that't issuing the command, you must use the full Key ARN not the Key ID.

\n

Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", + "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" + } + }, + "SSEKMSEncryptionContext": { + "target": "com.amazonaws.s3#SSEKMSEncryptionContext", + "traits": { + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject operations on\n this object.

\n

\n General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", + "smithy.api#httpHeader": "x-amz-server-side-encryption-context" + } + }, + "BucketKeyEnabled": { + "target": "com.amazonaws.s3#BucketKeyEnabled", + "traits": { + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using KMS keys (SSE-KMS).

\n

S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

", + "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" + } } }, "traits": { @@ -20077,7 +20143,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3). For information about the bucket\n default encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.

\n

To use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to DeleteBucketEncryption:

\n ", + "smithy.api#documentation": "

This implementation of the DELETE action resets the default encryption for the bucket as\n server-side encryption with Amazon S3 managed keys (SSE-S3).

\n \n \n \n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. \n The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to DeleteBucketEncryption:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?encryption", @@ -20096,7 +20162,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket containing the server-side encryption configuration to\n delete.

", + "smithy.api#documentation": "

The name of the bucket containing the server-side encryption configuration to\n delete.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -20107,7 +20173,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -20726,7 +20792,7 @@ "target": "com.amazonaws.s3#DeleteObjectOutput" }, "traits": { - "smithy.api#documentation": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state:

\n
    \n
  • \n

    If bucket versioning is not enabled, the operation permanently deletes the object.

    \n
  • \n
  • \n

    If bucket versioning is enabled, the operation inserts a delete marker, which becomes the current version of the object. To permanently delete an object in a versioned bucket, you must include the object’s versionId in the request. For more information about versioning-enabled buckets, see Deleting object versions from a versioning-enabled bucket.

    \n
  • \n
  • \n

    If bucket versioning is suspended, the operation removes the object that has a null versionId, if there is one, and inserts a delete marker that becomes the current version of the object. If there isn't an object with a null versionId, and all versions of the object have a versionId, Amazon S3 does not remove the object and only inserts a delete marker. To permanently delete an object that has a versionId, you must include the object’s versionId in the request. For more information about versioning-suspended buckets, see Deleting objects from versioning-suspended buckets.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

To remove a specific version, you must use the versionId query parameter. Using this\n query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3\n sets the response header x-amz-delete-marker to true.

\n

If the object you want to delete is in a bucket where the bucket versioning\n configuration is MFA Delete enabled, you must include the x-amz-mfa request\n header in the DELETE versionId request. Requests that include\n x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3\n User Guide. To see sample\n requests that use versioning, see Sample\n Request.

\n \n

\n Directory buckets - MFA delete is not supported by directory buckets.

\n
\n

You can delete objects by explicitly calling DELETE Object or calling \n (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block\n users or accounts from removing or deleting objects from your bucket, you must deny them\n the s3:DeleteObject, s3:DeleteObjectVersion, and\n s3:PutLifeCycleConfiguration actions.

\n \n

\n Directory buckets - S3 Lifecycle is not supported by directory buckets.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n DeleteObjects request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:DeleteObject\n - To delete an object from a bucket, you must always have the s3:DeleteObject permission.

      \n
    • \n
    • \n

      \n \n s3:DeleteObjectVersion\n - To delete a specific version of an object from a versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following action is related to DeleteObject:

\n ", + "smithy.api#documentation": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state.\n For more information, see Best\n practices to consider before deleting an object.

\n

To remove a specific version, you must use the versionId query parameter.\n Using this query parameter permanently deletes the version. If the object deleted is a\n delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. If\n the object you want to delete is in a bucket where the bucket versioning configuration is\n MFA delete enabled, you must include the x-amz-mfa request header in the\n DELETE versionId request. Requests that include x-amz-mfa must\n use HTTPS. For more information about MFA delete and to see example requests, see Using MFA\n delete and Sample\n request in the Amazon S3 User Guide.

\n \n
    \n
  • \n

    S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the\n versionId query parameter in the request.

    \n
  • \n
  • \n

    For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    MFA delete is not supported by directory buckets.

    \n
  • \n
\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The\n following permissions are required in your policies when your\n DeleteObjects request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:DeleteObject\n \n - To delete an object from a bucket, you must always have the\n s3:DeleteObject permission.

      \n \n

      You can also use PutBucketLifecycle to delete\n objects in Amazon S3.

      \n
      \n
    • \n
    • \n

      \n \n s3:DeleteObjectVersion\n - To delete a specific version of an object from a\n versioning-enabled bucket, you must have the\n s3:DeleteObjectVersion permission.

      \n
    • \n
    • \n

      If you want to block users or accounts from removing or deleting\n objects from your bucket, you must deny them the\n s3:DeleteObject, s3:DeleteObjectVersion,\n and s3:PutLifeCycleConfiguration permissions.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory buckets permissions -\n To grant access to this API operation on a directory bucket, we recommend\n that you use the CreateSession API operation for\n session-based authorization.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following action is related to DeleteObject:

\n ", "smithy.api#examples": [ { "title": "To delete an object (from a non-versioned bucket)", @@ -21081,7 +21147,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    CRC32

    \n
  • \n
  • \n

    CRC32C

    \n
  • \n
  • \n

    SHA1

    \n
  • \n
  • \n

    SHA256

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC32\n

    \n
  • \n
  • \n

    \n CRC32C\n

    \n
  • \n
  • \n

    \n SHA1\n

    \n
  • \n
  • \n

    \n SHA256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } } @@ -21573,7 +21639,7 @@ } }, "traits": { - "smithy.api#documentation": "

Optional configuration to replicate existing source bucket objects. For more\n information, see Replicating Existing Objects in the Amazon S3 User Guide.\n

" + "smithy.api#documentation": "

Optional configuration to replicate existing source bucket objects. \n

\n \n

This parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide.

\n
" } }, "com.amazonaws.s3#ExistingObjectReplicationStatus": { @@ -22035,7 +22101,7 @@ "target": "com.amazonaws.s3#GetBucketEncryptionOutput" }, "traits": { - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3). For information about the bucket default encryption feature, see Amazon S3 Bucket\n Default Encryption in the Amazon S3 User Guide.

\n

To use this operation, you must have permission to perform the\n s3:GetEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

\n

The following operations are related to GetBucketEncryption:

\n ", + "smithy.api#documentation": "

Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets\n have a default encryption configuration that uses server-side encryption with Amazon S3 managed\n keys (SSE-S3).

\n \n \n \n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The s3:GetEncryptionConfiguration permission is required in a policy. \n The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Operations and Managing\n Access Permissions to Your Amazon S3 Resources.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to GetBucketEncryption:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?encryption", @@ -22068,7 +22134,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The name of the bucket from which the server-side encryption configuration is\n retrieved.

", + "smithy.api#documentation": "

The name of the bucket from which the server-side encryption configuration is\n retrieved.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -22079,7 +22145,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -22279,6 +22345,13 @@ "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Rule" } + }, + "TransitionDefaultMinimumObjectSize": { + "target": "com.amazonaws.s3#TransitionDefaultMinimumObjectSize", + "traits": { + "smithy.api#documentation": "

Indicates which default minimum object size behavior is applied to the lifecycle configuration.

\n
    \n
  • \n

    \n all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default.

    \n
  • \n
  • \n

    \n varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.\n

    \n
  • \n
\n

To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior.

", + "smithy.api#httpHeader": "x-amz-transition-default-minimum-object-size" + } } }, "traits": { @@ -23221,7 +23294,7 @@ "SHA1" ] }, - "smithy.api#documentation": "

Retrieves an object from Amazon S3.

\n

In the GetObject request, specify the full key name for the object.

\n

\n General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg, specify the object key name as\n /photos/2006/February/sample.jpg. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg in the bucket named\n examplebucket, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.

\n

\n Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket--use1-az5--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ\n access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation \n returns the object without using an authorization header. For more information, see Specifying permissions in\n a policy in the Amazon S3 User Guide.

    \n

    If you include a versionId in your request header, you must have the\n s3:GetObjectVersion permission to access a specific\n version of an object. The s3:GetObject permission is not required in this scenario.

    \n

    If you request the\n current version of an object without a specific versionId in the request header, only\n the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario.\n

    \n

    If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket\n permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns an\n HTTP status code 403 Access Denied error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Storage classes
\n
\n

If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the \n S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the \n S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState error. For information about restoring archived objects,\n see Restoring\n Archived Objects in the Amazon S3 User Guide.

\n

\n Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

\n
\n
Encryption
\n
\n

Encryption request headers, like x-amz-server-side-encryption, should not\n be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses \n these types of keys, you’ll get an HTTP 400 Bad Request error.

\n
\n
Overriding response header values through the request
\n
\n

There are times when you want to override certain response header values of a\n GetObject response. For example, you might override the\n Content-Disposition response header value through your GetObject\n request.

\n

You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. \n The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. \n

\n

The response headers that you can override for the\n GetObject response are Cache-Control, Content-Disposition, \n Content-Encoding, Content-Language, Content-Type, and Expires.

\n

To override values for a set of response headers in the\n GetObject response, you can use the following query\n parameters in the request.

\n
    \n
  • \n

    \n response-cache-control\n

    \n
  • \n
  • \n

    \n response-content-disposition\n

    \n
  • \n
  • \n

    \n response-content-encoding\n

    \n
  • \n
  • \n

    \n response-content-language\n

    \n
  • \n
  • \n

    \n response-content-type\n

    \n
  • \n
  • \n

    \n response-expires\n

    \n
  • \n
\n \n

When you use these parameters, you must sign the request by using either an Authorization header or a\n presigned URL. These parameters cannot be used with an\n unsigned (anonymous) request.

\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to GetObject:

\n ", + "smithy.api#documentation": "

Retrieves an object from Amazon S3.

\n

In the GetObject request, specify the full key name for the object.

\n

\n General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg, specify the object key name as\n /photos/2006/February/sample.jpg. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg in the bucket named\n examplebucket, specify the object key name as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host\n Header Bucket Specification in the Amazon S3 User Guide.

\n

\n Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket--use1-az5--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - You must have the required permissions in a policy. To use GetObject, you must have the READ\n access to the object (or version). If you grant READ access to the anonymous user, the GetObject operation \n returns the object without using an authorization header. For more information, see Specifying permissions in\n a policy in the Amazon S3 User Guide.

    \n

    If you include a versionId in your request header, you must have the\n s3:GetObjectVersion permission to access a specific\n version of an object. The s3:GetObject permission is not required in this scenario.

    \n

    If you request the\n current version of an object without a specific versionId in the request header, only\n the s3:GetObject permission is required. The s3:GetObjectVersion permission is not required in this scenario.\n

    \n

    If the object that you request doesn’t exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket\n permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns an\n HTTP status code 403 Access Denied error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n

    If the object is encrypted using \n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n
  • \n
\n
\n
Storage classes
\n
\n

If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the \n S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access tier, or the \n S3 Intelligent-Tiering Deep Archive Access tier, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this operation returns an\n InvalidObjectState error. For information about restoring archived objects,\n see Restoring\n Archived Objects in the Amazon S3 User Guide.

\n

\n Directory buckets - For directory buckets, only the S3 Express One Zone storage class is supported to store newly created objects. \nUnsupported storage class values won't write a destination object and will respond with the HTTP status code 400 Bad Request.

\n
\n
Encryption
\n
\n

Encryption request headers, like x-amz-server-side-encryption, should not\n be sent for the GetObject requests, if your object uses server-side encryption with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with Key Management Service (KMS)\n keys (SSE-KMS), or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject requests for the object that uses \n these types of keys, you’ll get an HTTP 400 Bad Request error.

\n

\n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

\n
\n
Overriding response header values through the request
\n
\n

There are times when you want to override certain response header values of a\n GetObject response. For example, you might override the\n Content-Disposition response header value through your GetObject\n request.

\n

You can override values for a set of response headers. These modified response header values are included only in a successful response, that is, when the HTTP status code 200 OK is returned. \n The headers you can override using the following query parameters in the request are a subset of the headers that Amazon S3 accepts when you create an object. \n

\n

The response headers that you can override for the\n GetObject response are Cache-Control, Content-Disposition, \n Content-Encoding, Content-Language, Content-Type, and Expires.

\n

To override values for a set of response headers in the\n GetObject response, you can use the following query\n parameters in the request.

\n
    \n
  • \n

    \n response-cache-control\n

    \n
  • \n
  • \n

    \n response-content-disposition\n

    \n
  • \n
  • \n

    \n response-content-encoding\n

    \n
  • \n
  • \n

    \n response-content-language\n

    \n
  • \n
  • \n

    \n response-content-type\n

    \n
  • \n
  • \n

    \n response-expires\n

    \n
  • \n
\n \n

When you use these parameters, you must sign the request by using either an Authorization header or a\n presigned URL. These parameters cannot be used with an\n unsigned (anonymous) request.

\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to GetObject:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?x-id=GetObject", @@ -23393,7 +23466,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves all the metadata from an object without returning the object itself. This\n operation is useful if you're interested only in an object's metadata.

\n

\n GetObjectAttributes combines the functionality of HeadObject\n and ListParts. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To use\n GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation with depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion and s3:GetObjectVersionAttributes\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject and s3:GetObjectAttributes permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found (\"no such key\")\n error.

      \n
    • \n
    • \n

      If you don't have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden (\"access denied\")\n error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a GET request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
\n
\n
Versioning
\n
\n

\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

\n
\n
Conditional request headers
\n
\n

Consider the following when using request headers:

\n
    \n
  • \n

    If both of the If-Match and If-Unmodified-Since headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK and the data requested:

    \n
      \n
    • \n

      \n If-Match condition evaluates to true.

      \n
    • \n
    • \n

      \n If-Unmodified-Since condition evaluates to\n false.

      \n
    • \n
    \n

    For more information about conditional requests, see RFC 7232.

    \n
  • \n
  • \n

    If both of the If-None-Match and If-Modified-Since\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified:

    \n
      \n
    • \n

      \n If-None-Match condition evaluates to false.

      \n
    • \n
    • \n

      \n If-Modified-Since condition evaluates to\n true.

      \n
    • \n
    \n

    For more information about conditional requests, see RFC 7232.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following actions are related to GetObjectAttributes:

\n ", + "smithy.api#documentation": "

Retrieves all the metadata from an object without returning the object itself. This\n operation is useful if you're interested only in an object's metadata.

\n

\n GetObjectAttributes combines the functionality of HeadObject\n and ListParts. All of the data returned with each of those individual calls\n can be returned with a single call to GetObjectAttributes.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To use\n GetObjectAttributes, you must have READ access to the object. The permissions that you need to use this operation depend on whether the\n bucket is versioned. If the bucket is versioned, you need both the\n s3:GetObjectVersion and s3:GetObjectVersionAttributes\n permissions for this operation. If the bucket is not versioned, you need the\n s3:GetObject and s3:GetObjectAttributes permissions.\n For more information, see Specifying Permissions in\n a Policy in the Amazon S3 User Guide. If the object\n that you request does not exist, the error Amazon S3 returns depends on whether you\n also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found (\"no such key\")\n error.

      \n
    • \n
    • \n

      If you don't have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden (\"access denied\")\n error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n

    If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a GET request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

\n
\n
\n
Versioning
\n
\n

\n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

\n
\n
Conditional request headers
\n
\n

Consider the following when using request headers:

\n
    \n
  • \n

    If both of the If-Match and If-Unmodified-Since headers\n are present in the request as follows, then Amazon S3 returns the HTTP status code\n 200 OK and the data requested:

    \n
      \n
    • \n

      \n If-Match condition evaluates to true.

      \n
    • \n
    • \n

      \n If-Unmodified-Since condition evaluates to\n false.

      \n
    • \n
    \n

    For more information about conditional requests, see RFC 7232.

    \n
  • \n
  • \n

    If both of the If-None-Match and If-Modified-Since\n headers are present in the request as follows, then Amazon S3 returns the HTTP status code\n 304 Not Modified:

    \n
      \n
    • \n

      \n If-None-Match condition evaluates to false.

      \n
    • \n
    • \n

      \n If-Modified-Since condition evaluates to\n true.

      \n
    • \n
    \n

    For more information about conditional requests, see RFC 7232.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following actions are related to GetObjectAttributes:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?attributes", @@ -23463,7 +23536,8 @@ } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#xmlName": "GetObjectAttributesResponse" } }, "com.amazonaws.s3#GetObjectAttributesParts": { @@ -23804,14 +23878,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. For more information, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -23902,7 +23976,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3.

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -23930,14 +24004,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -24154,7 +24228,7 @@ "ChecksumMode": { "target": "com.amazonaws.s3#ChecksumMode", "traits": { - "smithy.api#documentation": "

To retrieve the checksum, this mode must be enabled.

\n

In addition, if you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt action to retrieve the checksum.

", + "smithy.api#documentation": "

To retrieve the checksum, this mode must be enabled.

\n

\n General purpose buckets - In addition, if you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt action to retrieve the checksum.

", "smithy.api#httpHeader": "x-amz-checksum-mode" } } @@ -24768,7 +24842,7 @@ } ], "traits": { - "smithy.api#documentation": "

The HEAD operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.

\n \n

A HEAD request has the same options as a GET operation on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not\n Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. \n It's not possible to retrieve the exact exception of these error codes.

\n
\n

Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.

\n
\n
Permissions
\n
\n

\n
    \n
  • \n

    \n General purpose bucket permissions - To\n use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide.

    \n

    If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket permissions - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
\n
\n
Versioning
\n
\n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - Delete marker is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n \n

For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
\n

The following actions are related to HeadObject:

\n ", + "smithy.api#documentation": "

The HEAD operation retrieves metadata from an object without returning the\n object itself. This operation is useful if you're interested only in an object's metadata.

\n \n

A HEAD request has the same options as a GET operation on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not\n Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. \n It's not possible to retrieve the exact exception of these error codes.

\n
\n

Request headers are limited to 8 KB in size. For more information, see Common\n Request Headers.

\n
\n
Permissions
\n
\n

\n
    \n
  • \n

    \n General purpose bucket permissions - To\n use HEAD, you must have the s3:GetObject permission. You need the relevant read object (or version) permission for this operation.\n For more information, see Actions, resources, and condition\n keys for Amazon S3 in the Amazon S3\n User Guide. For more information about the permissions to S3 API operations by S3 resource types, see Required permissions for Amazon S3 API operations in the Amazon S3 User Guide.

    \n

    If the object you request doesn't exist, the error that\n Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    \n
      \n
    • \n

      If you have the s3:ListBucket permission on the bucket, Amazon S3\n returns an HTTP status code 404 Not Found error.

      \n
    • \n
    • \n

      If you don’t have the s3:ListBucket permission, Amazon S3 returns\n an HTTP status code 403 Forbidden error.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n

    If you enable x-amz-checksum-mode in the request and the object is encrypted with\n Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.

    \n
  • \n
\n
\n
Encryption
\n
\n \n

Encryption request headers, like x-amz-server-side-encryption,\n should not be sent for HEAD requests if your object uses server-side\n encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side\n encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with Amazon S3\n managed encryption keys (SSE-S3). The x-amz-server-side-encryption header is used when you PUT an object to S3 and want to specify the encryption method. \n If you include this header in a HEAD request for an object that uses these types of keys, \n you’ll get an HTTP 400 Bad Request error. It's because the encryption method can't be changed when you retrieve the object.

\n
\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers to provide the encryption key for the server to be able to retrieve the object's metadata. The headers are:

\n
    \n
  • \n

    \n x-amz-server-side-encryption-customer-algorithm\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key\n

    \n
  • \n
  • \n

    \n x-amz-server-side-encryption-customer-key-MD5\n

    \n
  • \n
\n

For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the Amazon S3\n User Guide.

\n \n

\n Directory bucket - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

\n
\n
\n
Versioning
\n
\n
    \n
  • \n

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    \n
  • \n
  • \n

    If the specified version is a delete marker, the response returns a 405 Method Not Allowed error and the Last-Modified: timestamp response header.

    \n
  • \n
\n \n
    \n
  • \n

    \n Directory buckets - Delete marker is not supported by directory buckets.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Versioning isn't enabled and supported for directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null \n to the versionId query parameter in the request.

    \n
  • \n
\n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n \n

For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
\n

The following actions are related to HeadObject:

\n ", "smithy.api#http": { "method": "HEAD", "uri": "/{Bucket}/{Key+}", @@ -24861,14 +24935,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -24959,7 +25033,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -24987,14 +25061,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -25204,7 +25278,7 @@ "ChecksumMode": { "target": "com.amazonaws.s3#ChecksumMode", "traits": { - "smithy.api#documentation": "

To retrieve the checksum, this parameter must be enabled.

\n

In addition, if you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt action to retrieve the checksum.

", + "smithy.api#documentation": "

To retrieve the checksum, this parameter must be enabled.

\n

\n General purpose buckets - If you enable checksum mode and the object is uploaded with a \n checksum \n and encrypted with an Key Management Service (KMS) key, you must have permission to use the \n kms:Decrypt action to retrieve the checksum.

\n

\n Directory buckets - If you enable ChecksumMode and the object is encrypted with\n Amazon Web Services Key Management Service (Amazon Web Services KMS), you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key to retrieve the checksum of the object.

", "smithy.api#httpHeader": "x-amz-checksum-mode" } } @@ -26021,7 +26095,7 @@ } }, "com.amazonaws.s3#LifecycleRuleFilter": { - "type": "union", + "type": "structure", "members": { "Prefix": { "target": "com.amazonaws.s3#Prefix", @@ -26477,6 +26551,12 @@ "traits": { "smithy.api#documentation": "

\n ContinuationToken is included in the\n response when there are more buckets that can be listed with pagination. The next ListBuckets request to Amazon S3 can be continued with this ContinuationToken. ContinuationToken is obfuscated and is not a real bucket.

" } + }, + "Prefix": { + "target": "com.amazonaws.s3#Prefix", + "traits": { + "smithy.api#documentation": "

If Prefix was sent with the request, it is included in the response.

\n

All bucket names in the response begin with the specified bucket name prefix.

" + } } }, "traits": { @@ -26500,6 +26580,20 @@ "smithy.api#documentation": "

\n ContinuationToken indicates to Amazon S3 that the list is being continued on\n this bucket with a token. ContinuationToken is obfuscated and is not a real\n key. You can use this ContinuationToken for pagination of the list results.

\n

Length Constraints: Minimum length of 0. Maximum length of 1024.

\n

Required: No.

", "smithy.api#httpQuery": "continuation-token" } + }, + "Prefix": { + "target": "com.amazonaws.s3#Prefix", + "traits": { + "smithy.api#documentation": "

Limits the response to bucket names that begin with the specified bucket name prefix.

", + "smithy.api#httpQuery": "prefix" + } + }, + "BucketRegion": { + "target": "com.amazonaws.s3#BucketRegion", + "traits": { + "smithy.api#documentation": "

Limits the response to buckets that are located in the specified Amazon Web Services Region. The Amazon Web Services Region must be expressed according to the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) Region. For a list of the valid values for all of the Amazon Web Services Regions, see Regions and Endpoints.

\n \n

Requests made to a Regional endpoint that is different from the bucket-region parameter are not supported. For example, if you want to limit the response to your buckets in Region us-west-2, the request must be made to an endpoint in Region us-west-2.

\n
", + "smithy.api#httpQuery": "bucket-region" + } } }, "traits": { @@ -26551,7 +26645,8 @@ } }, "traits": { - "smithy.api#output": {} + "smithy.api#output": {}, + "smithy.api#xmlName": "ListAllMyDirectoryBucketsResult" } }, "com.amazonaws.s3#ListDirectoryBucketsRequest": { @@ -28579,13 +28674,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -28930,13 +29025,13 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

" } }, "ChecksumSHA1": { @@ -29170,7 +29265,7 @@ "RestrictPublicBuckets": { "target": "com.amazonaws.s3#Setting", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE restricts access to this bucket to only Amazon Web Servicesservice principals and authorized users within this account if the bucket has\n a public policy.

\n

Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.

", + "smithy.api#documentation": "

Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting\n this element to TRUE restricts access to this bucket to only Amazon Web Services service principals and authorized users within this account if the bucket has\n a public policy.

\n

Enabling this setting doesn't affect previously stored bucket policies, except that\n public and cross-account access within any public bucket policy, including non-public\n delegation to specific accounts, is blocked.

", "smithy.api#xmlName": "RestrictPublicBuckets" } } @@ -29569,7 +29664,7 @@ "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action uses the encryption subresource to configure default encryption\n and Amazon S3 Bucket Keys for an existing bucket.

\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n

Also, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n

To use this operation, you must have permission to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n

The following operations are related to PutBucketEncryption:

\n ", + "smithy.api#documentation": "

This operation configures default encryption \n and Amazon S3 Bucket Keys for an existing bucket.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Regional endpoint. These endpoints support path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. \nFor more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n

By default, all buckets have a default encryption configuration that uses server-side\n encryption with Amazon S3 managed keys (SSE-S3).

\n \n
    \n
  • \n

    \n General purpose buckets\n

    \n
      \n
    • \n

      You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or\n dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). \n If you specify default encryption by using\n SSE-KMS, you can also configure Amazon S3 Bucket\n Keys. For information about the bucket default\n encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.\n

      \n
    • \n
    • \n

      If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID provided in PutBucketEncryption requests.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory buckets - You can optionally configure default encryption\n for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS).

    \n
      \n
    • \n

      We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads.

      \n
    • \n
    • \n

      Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

      \n
    • \n
    • \n

      S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

      \n
    • \n
    • \n

      When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.

      \n
    • \n
    • \n

      For directory buckets, if you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption requests.

      \n
    • \n
    \n
  • \n
\n
\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n

Also, this action requires Amazon Web Services Signature Version 4. For more information, see \n Authenticating Requests (Amazon Web Services Signature Version 4).

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The s3:PutEncryptionConfiguration permission is required in a policy. \n The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Operations and Managing\n Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation, you must have the s3express:PutEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    \n

    To set a directory bucket default encryption with SSE-KMS, you must also have the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the target KMS key.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

\n
\n
\n

The following operations are related to PutBucketEncryption:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?encryption", @@ -29588,7 +29683,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

Specifies default encryption for a bucket using server-side encryption with different\n key options. By default, all buckets have a default encryption configuration that uses\n server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure\n default encryption for a bucket by using server-side encryption with an Amazon Web Services KMS key\n (SSE-KMS) or a customer-provided key (SSE-C). For information about the bucket default\n encryption feature, see Amazon S3 Bucket Default Encryption\n in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Specifies default encryption for a bucket using server-side encryption with different\n key options.

\n

\n Directory buckets - When you use this operation with a directory bucket, you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name\n . Virtual-hosted-style requests aren't supported. Directory bucket names must be unique in the chosen Availability Zone. Bucket names must also follow the format \n bucket_base_name--az_id--x-s3 (for example, \n DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about bucket naming restrictions, see Directory bucket naming rules in the Amazon S3 User Guide\n

", "smithy.api#httpLabel": {}, "smithy.api#required": {}, "smithy.rules#contextParam": { @@ -29599,14 +29694,14 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the server-side encryption\n configuration.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

", + "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the server-side encryption\n configuration.

\n

For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.

\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request. For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If you provide an individual checksum, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter.

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -29621,7 +29716,7 @@ "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

\n \n

For directory buckets, this header is not supported in this API operation. If you specify this header, the request fails with the HTTP status code \n501 Not Implemented.

\n
", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } } @@ -29759,14 +29854,14 @@ "target": "com.amazonaws.s3#PutBucketLifecycleConfigurationRequest" }, "output": { - "target": "smithy.api#Unit" + "target": "com.amazonaws.s3#PutBucketLifecycleConfigurationOutput" }, "traits": { "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm", "requestChecksumRequired": true }, - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.

\n \n

Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility.\n For the related API description, see PutBucketLifecycle.

\n
\n
\n
Rules
\n
\n

You specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3\n Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.\n Each rule consists of the following:

\n
    \n
  • \n

    A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.

    \n
  • \n
  • \n

    A status indicating whether the rule is in effect.

    \n
  • \n
  • \n

    One or more lifecycle transition and expiration actions that you want\n Amazon S3 to perform on the objects identified by the filter. If the state of\n your bucket is versioning-enabled or versioning-suspended, you can have many\n versions of the same object (one current version and zero or more noncurrent\n versions). Amazon S3 provides predefined actions that you can specify for current\n and noncurrent object versions.

    \n
  • \n
\n

For more information, see Object Lifecycle\n Management and Lifecycle Configuration\n Elements.

\n
\n
Permissions
\n
\n

By default, all Amazon S3 resources are private, including buckets, objects, and\n related subresources (for example, lifecycle configuration and website\n configuration). Only the resource owner (that is, the Amazon Web Services account that created\n it) can access the resource. The resource owner can optionally grant access\n permissions to others by writing an access policy. For this operation, a user must\n get the s3:PutLifecycleConfiguration permission.

\n

You can also explicitly deny permissions. An explicit deny also supersedes any\n other permissions. If you want to block users or accounts from removing or\n deleting objects from your bucket, you must deny them permissions for the\n following actions:

\n
    \n
  • \n

    \n s3:DeleteObject\n

    \n
  • \n
  • \n

    \n s3:DeleteObjectVersion\n

    \n
  • \n
  • \n

    \n s3:PutLifecycleConfiguration\n

    \n
  • \n
\n

For more information about permissions, see Managing Access\n Permissions to Your Amazon S3 Resources.

\n
\n
\n

The following operations are related to\n PutBucketLifecycleConfiguration:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle\n configuration. Keep in mind that this will overwrite an existing lifecycle configuration,\n so if you want to retain any configuration details, they must be included in the new\n lifecycle configuration. For information about lifecycle configuration, see Managing\n your storage lifecycle.

\n
\n
Rules
\n
\n

You specify the lifecycle configuration in your request body. The lifecycle\n configuration is specified as XML consisting of one or more rules. An Amazon S3\n Lifecycle configuration can have up to 1,000 rules. This limit is not adjustable.

\n

Bucket lifecycle configuration supports specifying a lifecycle rule using an object key name prefix, one or more object tags, object size, or any combination of these. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility.\n For the related API description, see PutBucketLifecycle.

\n

A lifecycle rule consists of the following:

\n
    \n
  • \n

    A filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, object size, or any combination of these.

    \n
  • \n
  • \n

    A status indicating whether the rule is in effect.

    \n
  • \n
  • \n

    One or more lifecycle transition and expiration actions that you want\n Amazon S3 to perform on the objects identified by the filter. If the state of\n your bucket is versioning-enabled or versioning-suspended, you can have many\n versions of the same object (one current version and zero or more noncurrent\n versions). Amazon S3 provides predefined actions that you can specify for current\n and noncurrent object versions.

    \n
  • \n
\n

For more information, see Object Lifecycle\n Management and Lifecycle Configuration\n Elements.

\n
\n
Permissions
\n
\n

By default, all Amazon S3 resources are private, including buckets, objects, and\n related subresources (for example, lifecycle configuration and website\n configuration). Only the resource owner (that is, the Amazon Web Services account that created\n it) can access the resource. The resource owner can optionally grant access\n permissions to others by writing an access policy. For this operation, a user must\n get the s3:PutLifecycleConfiguration permission.

\n

You can also explicitly deny permissions. An explicit deny also supersedes any\n other permissions. If you want to block users or accounts from removing or\n deleting objects from your bucket, you must deny them permissions for the\n following actions:

\n
    \n
  • \n

    \n s3:DeleteObject\n

    \n
  • \n
  • \n

    \n s3:DeleteObjectVersion\n

    \n
  • \n
  • \n

    \n s3:PutLifecycleConfiguration\n

    \n
  • \n
\n

For more information about permissions, see Managing Access\n Permissions to Your Amazon S3 Resources.

\n
\n
\n

The following operations are related to\n PutBucketLifecycleConfiguration:

\n ", "smithy.api#examples": [ { "title": "Put bucket lifecycle", @@ -29808,6 +29903,21 @@ } } }, + "com.amazonaws.s3#PutBucketLifecycleConfigurationOutput": { + "type": "structure", + "members": { + "TransitionDefaultMinimumObjectSize": { + "target": "com.amazonaws.s3#TransitionDefaultMinimumObjectSize", + "traits": { + "smithy.api#documentation": "

Indicates which default minimum object size behavior is applied to the lifecycle configuration.

\n
    \n
  • \n

    \n all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default.

    \n
  • \n
  • \n

    \n varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.\n

    \n
  • \n
\n

To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior.

", + "smithy.api#httpHeader": "x-amz-transition-default-minimum-object-size" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.s3#PutBucketLifecycleConfigurationRequest": { "type": "structure", "members": { @@ -29843,6 +29953,13 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied).

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } + }, + "TransitionDefaultMinimumObjectSize": { + "target": "com.amazonaws.s3#TransitionDefaultMinimumObjectSize", + "traits": { + "smithy.api#documentation": "

Indicates which default minimum object size behavior is applied to the lifecycle configuration.

\n
    \n
  • \n

    \n all_storage_classes_128K - Objects smaller than 128 KB will not transition to any storage class by default.

    \n
  • \n
  • \n

    \n varies_by_storage_class - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.\n

    \n
  • \n
\n

To customize the minimum object size for any transition you can add a filter that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body of your transition rule. Custom filters always take precedence over the default transition behavior.

", + "smithy.api#httpHeader": "x-amz-transition-default-minimum-object-size" + } } }, "traits": { @@ -30222,7 +30339,7 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    CRC32

    \n
  • \n
  • \n

    CRC32C

    \n
  • \n
  • \n

    SHA1

    \n
  • \n
  • \n

    SHA256

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC32\n

    \n
  • \n
  • \n

    \n CRC32C\n

    \n
  • \n
  • \n

    \n SHA1\n

    \n
  • \n
  • \n

    \n SHA256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, @@ -30739,7 +30856,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "

Adds an object to a bucket.

\n \n
    \n
  • \n

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.

    \n
  • \n
  • \n

    If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All\n objects written to the bucket by any account will be owned by the bucket owner.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

\n
    \n
  • \n

    \n S3 Object Lock - To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock in the Amazon S3 User Guide.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
  • \n

    \n S3 Versioning - When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID\n of that object being stored in Amazon S3. \n You can retrieve, replace, or delete any version of the object. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n PutObject request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:PutObject\n - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object\n to it.

      \n
    • \n
    • \n

      \n \n s3:PutObjectAcl\n - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

      \n
    • \n
    • \n

      \n \n s3:PutObjectTagging\n - To successfully set the tag-set with your PutObject request, you\n must have the s3:PutObjectTagging.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Data integrity with Content-MD5
\n
\n
    \n
  • \n

    \n General purpose bucket - To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, \n you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

    \n
  • \n
  • \n

    \n Directory bucket - This functionality is not supported for directory buckets.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

For more information about related Amazon S3 APIs, see the following:

\n ", + "smithy.api#documentation": "

Adds an object to a bucket.

\n \n
    \n
  • \n

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket. You cannot use PutObject to only update a\n single piece of metadata for an existing object. You must put the entire object with\n updated metadata if you want to update some values.

    \n
  • \n
  • \n

    If your bucket uses the bucket owner enforced setting for Object Ownership, ACLs are disabled and no longer affect permissions. All\n objects written to the bucket by any account will be owned by the bucket owner.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n

Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. However, Amazon S3 provides features that can modify this behavior:

\n
    \n
  • \n

    \n S3 Object Lock - To prevent objects from\n being deleted or overwritten, you can use Amazon S3 Object\n Lock in the Amazon S3 User Guide.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
  • \n

    \n S3 Versioning - When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all versions of the objects. For each write request that is made to the same object, Amazon S3 automatically generates a unique version ID\n of that object being stored in Amazon S3. \n You can retrieve, replace, or delete any version of the object. For more information about versioning, see\n Adding Objects to\n Versioning-Enabled Buckets in the Amazon S3\n User Guide. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

    \n \n

    This functionality is not supported for directory buckets.

    \n
    \n
  • \n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - The following permissions are required in your policies when your \n PutObject request includes specific headers.

    \n
      \n
    • \n

      \n \n s3:PutObject\n - To successfully complete the PutObject request, you must always have the s3:PutObject permission on a bucket to add an object\n to it.

      \n
    • \n
    • \n

      \n \n s3:PutObjectAcl\n - To successfully change the objects ACL of your PutObject request, you must have the s3:PutObjectAcl.

      \n
    • \n
    • \n

      \n \n s3:PutObjectTagging\n - To successfully set the tag-set with your PutObject request, you\n must have the s3:PutObjectTagging.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n

    If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n
  • \n
\n
\n
Data integrity with Content-MD5
\n
\n
    \n
  • \n

    \n General purpose bucket - To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 digest, \n you can calculate the MD5 while putting the object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

    \n
  • \n
  • \n

    \n Directory bucket - This functionality is not supported for directory buckets.

    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

For more information about related Amazon S3 APIs, see the following:

\n ", "smithy.api#examples": [ { "title": "To create an object.", @@ -31240,14 +31357,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -31268,7 +31385,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3.

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -31296,21 +31413,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If x-amz-server-side-encryption has a valid value of aws:kms\n or aws:kms:dsse, this header indicates the ID of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The\n value of this header is a base64-encoded UTF-8 string holding JSON with the encryption\n context key-value pairs. This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject or CopyObject\n operations on this object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets\n passed on to Amazon Web Services KMS for future GetObject \n operations on this object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the uploaded object uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -31392,7 +31509,7 @@ "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { - "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information about Amazon S3 Object Lock, see Amazon S3 Object Lock\n Overview in the Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

The base64-encoded 128-bit MD5 digest of the message (without the headers) according to\n RFC 1864. This header can be used as a message integrity check to verify that the data is\n the same data that was originally sent. Although it is optional, we recommend using the\n Content-MD5 mechanism as an end-to-end integrity check. For more information about REST\n request authentication, see REST Authentication.

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information, see Uploading objects to an Object Lock enabled bucket\n in the Amazon S3 User Guide.

\n
\n \n

This functionality is not supported for directory buckets.

\n
", "smithy.api#httpHeader": "Content-MD5" } }, @@ -31406,21 +31523,21 @@ "ChecksumAlgorithm": { "target": "com.amazonaws.s3#ChecksumAlgorithm", "traits": { - "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    CRC32

    \n
  • \n
  • \n

    CRC32C

    \n
  • \n
  • \n

    SHA1

    \n
  • \n
  • \n

    SHA256

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

\n
", + "smithy.api#documentation": "

Indicates the algorithm used to create the checksum for the object when you use the SDK. This header will not provide any\n additional functionality if you don't use the SDK. When you send this header, there must be a corresponding x-amz-checksum-algorithm\n or\n x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request.

\n

For the x-amz-checksum-algorithm\n header, replace \n algorithm\n with the supported algorithm from the following list:

\n
    \n
  • \n

    \n CRC32\n

    \n
  • \n
  • \n

    \n CRC32C\n

    \n
  • \n
  • \n

    \n SHA1\n

    \n
  • \n
  • \n

    \n SHA256\n

    \n
  • \n
\n

For more\n information, see Checking object integrity in\n the Amazon S3 User Guide.

\n

If the individual checksum value you provide through x-amz-checksum-algorithm\n doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, Amazon S3 ignores any provided\n ChecksumAlgorithm parameter and uses the checksum algorithm that matches the provided value in x-amz-checksum-algorithm\n .

\n \n

The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any request to upload an\n object with a retention period configured using Amazon S3 Object Lock. For more\n information, see Uploading objects to an Object Lock enabled bucket\n in the Amazon S3 User Guide.

\n
\n

For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's used for performance.

", "smithy.api#httpHeader": "x-amz-sdk-checksum-algorithm" } }, "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -31501,7 +31618,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n

\n General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.

\n

\n Directory buckets - For directory buckets, only the server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.

", + "smithy.api#documentation": "

The server-side encryption algorithm that was used when you store this object in Amazon S3 (for example,\n AES256, aws:kms, aws:kms:dsse).

\n
    \n
  • \n

    \n General purpose buckets - You have four mutually exclusive options to protect data using server-side encryption in\n Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the\n encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or\n DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side\n encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to\n encrypt data at rest by using server-side encryption with other key options. For more\n information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). We recommend that the bucket's default encryption uses the desired encryption configuration and you don't override the bucket default encryption in your \n CreateSession requests or PUT object requests. Then, new objects \n are automatically encrypted with the desired encryption settings. For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide. For more information about the encryption overriding behaviors in directory buckets, see Specifying server-side encryption with KMS for new object uploads. \n

    \n

    In the Zonal endpoint API calls (except CopyObject and UploadPartCopy) using the REST API, the encryption request headers must match the encryption settings that are specified in the CreateSession request. \n You can't override the values of the encryption settings (x-amz-server-side-encryption, x-amz-server-side-encryption-aws-kms-key-id, x-amz-server-side-encryption-context, and x-amz-server-side-encryption-bucket-key-enabled) that are specified in the CreateSession request. \n You don't need to explicitly specify these encryption settings values in Zonal endpoint API calls, and \n Amazon S3 will use the encryption settings values from the CreateSession request to protect new objects in the directory bucket. \n

    \n \n

    When you use the CLI or the Amazon Web Services SDKs, for CreateSession, the session token refreshes automatically to avoid service interruptions when a session expires. The CLI or the Amazon Web Services SDKs use the bucket's default encryption configuration for the \n CreateSession request. It's not supported to override the encryption settings values in the CreateSession request. \n So in the Zonal endpoint API calls (except CopyObject and UploadPartCopy), \n the encryption request headers must match the default encryption configuration of the directory bucket.\n\n

    \n
    \n
  • \n
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -31543,21 +31660,21 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If x-amz-server-side-encryption has a valid value of aws:kms\n or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS)\n symmetric encryption customer managed key that was used for the object. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide\n x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data. If the KMS key does not exist in the same\n account that's issuing the command, you must use the full ARN and not just the ID.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object encryption. If the KMS key doesn't exist in the same\n account that's issuing the command, you must use the full Key ARN not the Key ID.

\n

\n General purpose buckets - If you specify x-amz-server-side-encryption with aws:kms or aws:kms:dsse, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the KMS \n key to use. If you specify\n x-amz-server-side-encryption:aws:kms or\n x-amz-server-side-encryption:aws:kms:dsse, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key\n (aws/s3) to protect the data.

\n

\n Directory buckets - If you specify x-amz-server-side-encryption with aws:kms, the \n x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned the ID of the KMS \n symmetric encryption customer managed key that's configured for your directory bucket's default encryption setting. \n If you want to specify the \n x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only specify it with the ID (Key ID or Key ARN) of the KMS \n customer managed key that's configured for your directory bucket's default encryption setting. Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "SSEKMSEncryptionContext": { "target": "com.amazonaws.s3#SSEKMSEncryptionContext", "traits": { - "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context to use for object encryption. The value of\n this header is a base64-encoded UTF-8 string holding JSON with the encryption context\n key-value pairs. This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject or CopyObject operations on\n this object. This value must be explicitly added during CopyObject operations.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Specifies the Amazon Web Services KMS Encryption Context as an additional encryption context to use for object encryption. The value of\n this header is a Base64-encoded string of a UTF-8 encoded JSON, which contains the encryption context as key-value pairs. \n This value is stored as object metadata and automatically gets passed on\n to Amazon Web Services KMS for future GetObject operations on\n this object.

\n

\n General purpose buckets - This value must be explicitly added during CopyObject operations if you want an additional encryption context for your object. For more information, see Encryption context in the Amazon S3 User Guide.

\n

\n Directory buckets - You can optionally provide an explicit encryption context value. The value must match the default encryption context - the bucket Amazon Resource Name (ARN). An additional encryption context value is not supported.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-context" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS). Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS.

\n

Specifying this header with a PUT action doesn’t affect bucket-level settings for S3\n Bucket Key.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with\n server-side encryption using Key Management Service (KMS) keys (SSE-KMS).

\n

\n General purpose buckets - Setting this header to\n true causes Amazon S3 to use an S3 Bucket Key for object encryption with\n SSE-KMS. Also, specifying this header with a PUT action doesn't affect bucket-level settings for S3\n Bucket Key.

\n

\n Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -32185,7 +32302,7 @@ "ExistingObjectReplication": { "target": "com.amazonaws.s3#ExistingObjectReplication", "traits": { - "smithy.api#documentation": "

Optional configuration to replicate existing source bucket objects. For more\n information, see Replicating Existing Objects in the Amazon S3 User Guide.\n

" + "smithy.api#documentation": "

Optional configuration to replicate existing source bucket objects.\n

\n \n

This parameter is no longer supported. To replicate existing objects, see Replicating existing objects with S3 Batch Replication in the Amazon S3 User Guide.

\n
" } }, "Destination": { @@ -32226,7 +32343,7 @@ } }, "com.amazonaws.s3#ReplicationRuleFilter": { - "type": "union", + "type": "structure", "members": { "Prefix": { "target": "com.amazonaws.s3#Prefix", @@ -32469,7 +32586,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n \n

The SELECT job type for the RestoreObject operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Restores an archived copy of an object back into Amazon S3

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

This action performs the following types of requests:

\n
    \n
  • \n

    \n restore an archive - Restore an archived object

    \n
  • \n
\n

For more information about the S3 structure in the request body, see the\n following:

\n \n
\n
Permissions
\n
\n

To use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n
\n
Restoring objects
\n
\n

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.

\n

To restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.

\n

When restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:

\n
    \n
  • \n

    \n Expedited - Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
  • \n

    \n Standard - Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.

    \n
  • \n
  • \n

    \n Bulk - Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
\n

For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.

\n

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.

\n

To get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.

\n

After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.

\n

If your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.

\n
\n
Responses
\n
\n

A successful action returns either the 200 OK or 202\n Accepted status code.

\n
    \n
  • \n

    If the object is not previously restored, then Amazon S3 returns 202\n Accepted in the response.

    \n
  • \n
  • \n

    If the object is previously restored, Amazon S3 returns 200 OK in\n the response.

    \n
  • \n
\n
    \n
  • \n

    Special errors:

    \n
      \n
    • \n

      \n Code: RestoreAlreadyInProgress\n

      \n
    • \n
    • \n

      \n Cause: Object restore is already in progress.\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 409 Conflict\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: Client\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n Code: GlacierExpeditedRetrievalNotAvailable\n

      \n
    • \n
    • \n

      \n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 503\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: N/A\n

      \n
    • \n
    \n
  • \n
\n
\n
\n

The following operations are related to RestoreObject:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

Restores an archived copy of an object back into Amazon S3

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

This action performs the following types of requests:

\n
    \n
  • \n

    \n restore an archive - Restore an archived object

    \n
  • \n
\n

For more information about the S3 structure in the request body, see the\n following:

\n \n
\n
Permissions
\n
\n

To use this operation, you must have permissions to perform the\n s3:RestoreObject action. The bucket owner has this permission by\n default and can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the\n Amazon S3 User Guide.

\n
\n
Restoring objects
\n
\n

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval\n or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the\n S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive\n storage classes, you must first initiate a restore request, and then wait until a\n temporary copy of the object is available. If you want a permanent copy of the\n object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket.\n To access an archived object, you must restore the object for the duration (number\n of days) that you specify. For objects in the Archive Access or Deep Archive\n Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request,\n and then wait until the object is moved into the Frequent Access tier.

\n

To restore a specific object version, you can provide a version ID. If you\n don't provide a version ID, Amazon S3 restores the current version.

\n

When restoring an archived object, you can specify one of the following data\n access tier options in the Tier element of the request body:

\n
    \n
  • \n

    \n Expedited - Expedited retrievals allow you to quickly access\n your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval\n storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests\n for restoring archives are required. For all but the largest archived\n objects (250 MB+), data accessed using Expedited retrievals is typically\n made available within 1–5 minutes. Provisioned capacity ensures that\n retrieval capacity for Expedited retrievals is available when you need it.\n Expedited retrievals and provisioned capacity are not available for objects\n stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
  • \n

    \n Standard - Standard retrievals allow you to access any of\n your archived objects within several hours. This is the default option for\n retrieval requests that do not specify the retrieval option. Standard\n retrievals typically finish within 3–5 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored\n in S3 Intelligent-Tiering.

    \n
  • \n
  • \n

    \n Bulk - Bulk retrievals free for objects stored in the\n S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes,\n enabling you to retrieve large amounts, even petabytes, of data at no cost.\n Bulk retrievals typically finish within 5–12 hours for objects stored in the\n S3 Glacier Flexible Retrieval Flexible Retrieval storage class or\n S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost\n retrieval option when restoring objects from\n S3 Glacier Deep Archive. They typically finish within 48 hours for\n objects stored in the S3 Glacier Deep Archive storage class or\n S3 Intelligent-Tiering Deep Archive tier.

    \n
  • \n
\n

For more information about archive retrieval options and provisioned capacity\n for Expedited data access, see Restoring Archived\n Objects in the Amazon S3 User Guide.

\n

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster\n speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the\n Amazon S3 User Guide.

\n

To get the status of object restoration, you can send a HEAD\n request. Operations return the x-amz-restore header, which provides\n information about the restoration status, in the response. You can use Amazon S3 event\n notifications to notify you when a restore is initiated or completed. For more\n information, see Configuring Amazon S3 Event\n Notifications in the Amazon S3 User Guide.

\n

After restoring an archived object, you can update the restoration period by\n reissuing the request with a new period. Amazon S3 updates the restoration period\n relative to the current time and charges only for the request-there are no\n data transfer charges. You cannot update the restoration period when Amazon S3 is\n actively processing your current restore request for the object.

\n

If your bucket has a lifecycle configuration with a rule that includes an\n expiration action, the object expiration overrides the life span that you specify\n in a restore request. For example, if you restore an object copy for 10 days, but\n the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days.\n For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle\n Management in Amazon S3 User Guide.

\n
\n
Responses
\n
\n

A successful action returns either the 200 OK or 202\n Accepted status code.

\n
    \n
  • \n

    If the object is not previously restored, then Amazon S3 returns 202\n Accepted in the response.

    \n
  • \n
  • \n

    If the object is previously restored, Amazon S3 returns 200 OK in\n the response.

    \n
  • \n
\n
    \n
  • \n

    Special errors:

    \n
      \n
    • \n

      \n Code: RestoreAlreadyInProgress\n

      \n
    • \n
    • \n

      \n Cause: Object restore is already in progress.\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 409 Conflict\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: Client\n

      \n
    • \n
    \n
  • \n
  • \n
      \n
    • \n

      \n Code: GlacierExpeditedRetrievalNotAvailable\n

      \n
    • \n
    • \n

      \n Cause: expedited retrievals are currently not available.\n Try again later. (Returned if there is insufficient capacity to\n process the Expedited request. This error applies only to Expedited\n retrievals and not to S3 Standard or Bulk retrievals.)\n

      \n
    • \n
    • \n

      \n HTTP Status Code: 503\n

      \n
    • \n
    • \n

      \n SOAP Fault Code Prefix: N/A\n

      \n
    • \n
    \n
  • \n
\n
\n
\n

The following operations are related to RestoreObject:

\n ", "smithy.api#examples": [ { "title": "To restore an archived object", @@ -32597,7 +32714,7 @@ "Type": { "target": "com.amazonaws.s3#RestoreRequestType", "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Type of restore request.

" + "smithy.api#documentation": "

Type of restore request.

" } }, "Tier": { @@ -32615,7 +32732,7 @@ "SelectParameters": { "target": "com.amazonaws.s3#SelectParameters", "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Describes the parameters for Select job types.

" + "smithy.api#documentation": "

Describes the parameters for Select job types.

" } }, "OutputLocation": { @@ -32840,7 +32957,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n \n

The SelectObjectContent operation is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the operation as usual. Learn more\n

\n
\n

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have the s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n
    \n
  • \n

    \n CSV, JSON, and Parquet - Objects must be in CSV,\n JSON, or Parquet format.

    \n
  • \n
  • \n

    \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

    \n
  • \n
  • \n

    \n GZIP or BZIP2 - CSV and JSON files can be compressed\n using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that\n Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar\n compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support\n whole-object compression for Parquet objects.

    \n
  • \n
  • \n

    \n Server-side encryption - Amazon S3 Select supports\n querying objects that are protected with server-side encryption.

    \n

    For objects that are encrypted with customer-provided encryption keys\n (SSE-C), you must use HTTPS, and you must use the headers that are\n documented in the GetObject. For more\n information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys)\n in the Amazon S3 User Guide.

    \n

    For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently,\n so you don't need to specify anything. For more information about\n server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n
    \n
  • \n

    \n Range: Although you can specify a scan range for an Amazon S3 Select\n request (see SelectObjectContentRequest - ScanRange in the request\n parameters), you cannot specify the range of bytes of an object to return.\n

    \n
  • \n
  • \n

    The GLACIER, DEEP_ARCHIVE, and\n REDUCED_REDUNDANCY storage classes, or the\n ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class: You cannot\n query objects in the GLACIER, DEEP_ARCHIVE, or\n REDUCED_REDUNDANCY storage classes, nor objects in the\n ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class. For more\n information about storage classes, see Using Amazon S3\n storage classes in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", + "smithy.api#documentation": "\n

This operation is not supported by directory buckets.

\n
\n

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This functionality is not supported for Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select, see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

\n

\n
\n
Permissions
\n
\n

You must have the s3:GetObject permission for this operation. Amazon S3\n Select does not support anonymous access. For more information about permissions,\n see Specifying Permissions in\n a Policy in the Amazon S3 User Guide.

\n
\n
Object Data Formats
\n
\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n
    \n
  • \n

    \n CSV, JSON, and Parquet - Objects must be in CSV,\n JSON, or Parquet format.

    \n
  • \n
  • \n

    \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

    \n
  • \n
  • \n

    \n GZIP or BZIP2 - CSV and JSON files can be compressed\n using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that\n Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar\n compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support\n whole-object compression for Parquet objects.

    \n
  • \n
  • \n

    \n Server-side encryption - Amazon S3 Select supports\n querying objects that are protected with server-side encryption.

    \n

    For objects that are encrypted with customer-provided encryption keys\n (SSE-C), you must use HTTPS, and you must use the headers that are\n documented in the GetObject. For more\n information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys)\n in the Amazon S3 User Guide.

    \n

    For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently,\n so you don't need to specify anything. For more information about\n server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Working with the Response Body
\n
\n

Given the response size is unknown, Amazon S3 Select streams the response as a\n series of messages and includes a Transfer-Encoding header with\n chunked as its value in the response. For more information, see\n Appendix:\n SelectObjectContent\n Response.

\n
\n
GetObject Support
\n
\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n
    \n
  • \n

    \n Range: Although you can specify a scan range for an Amazon S3 Select\n request (see SelectObjectContentRequest - ScanRange in the request\n parameters), you cannot specify the range of bytes of an object to return.\n

    \n
  • \n
  • \n

    The GLACIER, DEEP_ARCHIVE, and\n REDUCED_REDUNDANCY storage classes, or the\n ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class: You cannot\n query objects in the GLACIER, DEEP_ARCHIVE, or\n REDUCED_REDUNDANCY storage classes, nor objects in the\n ARCHIVE_ACCESS or DEEP_ARCHIVE_ACCESS access\n tiers of the INTELLIGENT_TIERING storage class. For more\n information about storage classes, see Using Amazon S3\n storage classes in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Special Errors
\n
\n

For a list of special errors for this operation, see List of SELECT Object Content Error Codes\n

\n
\n
\n

The following operations are related to SelectObjectContent:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?select&select-type=2", @@ -32994,7 +33111,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Learn Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Request to filter the contents of an Amazon S3 object based on a simple Structured Query\n Language (SQL) statement. In the request, along with the SQL expression, you must specify a\n data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data\n into records. It returns only records that match the specified SQL expression. You must\n also specify the data serialization format for the response. For more information, see\n S3Select API Documentation.

", + "smithy.api#documentation": "

Request to filter the contents of an Amazon S3 object based on a simple Structured Query\n Language (SQL) statement. In the request, along with the SQL expression, you must specify a\n data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data\n into records. It returns only records that match the specified SQL expression. You must\n also specify the data serialization format for the response. For more information, see\n S3Select API Documentation.

", "smithy.api#input": {} } }, @@ -33018,7 +33135,7 @@ "Expression": { "target": "com.amazonaws.s3#Expression", "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

The expression that is used to query the object.

", + "smithy.api#documentation": "

The expression that is used to query the object.

", "smithy.api#required": {} } }, @@ -33031,7 +33148,7 @@ } }, "traits": { - "smithy.api#documentation": "\n

Amazon S3 Select is no longer available to new customers. Existing customers of Amazon S3 Select can continue to use the feature as usual. Learn more\n

\n
\n

Describes the parameters for Select job types.

\n

Learn How to optimize querying your data in Amazon S3 using\n Amazon Athena, S3 Object Lambda, or client-side filtering.

" + "smithy.api#documentation": "

Describes the parameters for Select job types.

" } }, "com.amazonaws.s3#ServerSideEncryption": { @@ -33063,19 +33180,19 @@ "SSEAlgorithm": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

Server-side encryption algorithm to use for the default encryption.

", + "smithy.api#documentation": "

Server-side encryption algorithm to use for the default encryption.

\n \n

For directory buckets, there are only two supported values for server-side encryption: AES256 and aws:kms.

\n
", "smithy.api#required": {} } }, "KMSMasterKeyID": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm is set to\n aws:kms or aws:kms:dsse.

\n

You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS\n key.

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key Alias: alias/alias-name\n

    \n
  • \n
\n

If you use a key ID, you can run into a LogDestination undeliverable error when creating\n a VPC flow log.

\n

If you are using encryption with cross-account or Amazon Web Services service operations you must use\n a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.

\n \n

Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.

\n
" + "smithy.api#documentation": "

Amazon Web Services Key Management Service (KMS) customer managed key ID to use for the default\n encryption.

\n \n
    \n
  • \n

    \n General purpose buckets - This parameter is allowed if and only if SSEAlgorithm is set to\n aws:kms or aws:kms:dsse.

    \n
  • \n
  • \n

    \n Directory buckets - This parameter is allowed if and only if SSEAlgorithm is set to\n aws:kms.

    \n
  • \n
\n
\n

You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS\n key.

\n
    \n
  • \n

    Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

    \n
  • \n
  • \n

    Key Alias: alias/alias-name\n

    \n
  • \n
\n

If you are using encryption with cross-account or Amazon Web Services service operations, you must use\n a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.

\n \n
    \n
  • \n

    \n General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating\n a VPC flow log. \n

    \n
  • \n
  • \n

    \n Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.

    \n
  • \n
\n
\n \n

Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service\n Developer Guide.

\n
" } } }, "traits": { - "smithy.api#documentation": "

Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more\n information, see PUT Bucket encryption in\n the Amazon S3 API Reference.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n
" + "smithy.api#documentation": "

Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. For more\n information, see PutBucketEncryption.

\n \n
    \n
  • \n

    \n General purpose buckets - If you don't specify a customer managed key at configuration, Amazon S3 automatically creates\n an Amazon Web Services KMS key (aws/s3) in your Amazon Web Services account the first time that you add an object encrypted\n with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS.

    \n
  • \n
  • \n

    \n Directory buckets - Your SSE-KMS configuration can only support 1 customer managed key per directory bucket for the lifetime of the bucket. \nThe Amazon Web Services managed key (aws/s3) isn't supported. \n

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: SSE-S3 and SSE-KMS.

    \n
  • \n
\n
" } }, "com.amazonaws.s3#ServerSideEncryptionConfiguration": { @@ -33107,12 +33224,12 @@ "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS\n (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the\n BucketKeyEnabled element to true causes Amazon S3 to use an S3\n Bucket Key. By default, S3 Bucket Key is not enabled.

\n

For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.

" + "smithy.api#documentation": "

Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS\n (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the\n BucketKeyEnabled element to true causes Amazon S3 to use an S3\n Bucket Key.

\n \n
    \n
  • \n

    \n General purpose buckets - By default, S3 Bucket Key is not enabled. For more information, see Amazon S3 Bucket Keys in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory buckets - S3 Bucket Keys are always enabled for GET and PUT operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through CopyObject, UploadPartCopy, the Copy operation in Batch Operations, or \n the import jobs. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

    \n
  • \n
\n
" } } }, "traits": { - "smithy.api#documentation": "

Specifies the default server-side encryption configuration.

\n \n

If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

\n
" + "smithy.api#documentation": "

Specifies the default server-side encryption configuration.

\n \n
    \n
  • \n

    \n General purpose buckets - If you're specifying a customer managed KMS key, we recommend using a fully qualified\n KMS key ARN. If you use a KMS key alias instead, then KMS resolves the key within the\n requester’s account. This behavior can result in data that's encrypted with a KMS key\n that belongs to the requester, and not the bucket owner.

    \n
  • \n
  • \n

    \n Directory buckets - When you specify an KMS customer managed key for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported.

    \n
  • \n
\n
" } }, "com.amazonaws.s3#ServerSideEncryptionRules": { @@ -33164,7 +33281,7 @@ } }, "traits": { - "smithy.api#documentation": "

The established temporary security credentials of the session.

\n \n

\n Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint APIs on directory buckets.

\n
" + "smithy.api#documentation": "

The established temporary security credentials of the session.

\n \n

\n Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint API operations on directory buckets.

\n
" } }, "com.amazonaws.s3#SessionExpiration": { @@ -33680,6 +33797,23 @@ "smithy.api#documentation": "

Specifies when an object transitions to a specified storage class. For more information\n about Amazon S3 lifecycle configuration rules, see Transitioning\n Objects Using Amazon S3 Lifecycle in the Amazon S3 User Guide.

" } }, + "com.amazonaws.s3#TransitionDefaultMinimumObjectSize": { + "type": "enum", + "members": { + "varies_by_storage_class": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "varies_by_storage_class" + } + }, + "all_storage_classes_128K": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "all_storage_classes_128K" + } + } + } + }, "com.amazonaws.s3#TransitionList": { "type": "list", "member": { @@ -33768,7 +33902,7 @@ "aws.protocols#httpChecksum": { "requestAlgorithmMember": "ChecksumAlgorithm" }, - "smithy.api#documentation": "

Uploads a part in a multipart upload.

\n \n

In this operation, you provide new data as a part of an object in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n

\n
\n

You must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier that you must include in your upload part request.

\n

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

After you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.

\n
\n

For more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service key, the\n requester must have permission to the kms:Decrypt and\n kms:GenerateDataKey actions on the key. The requester must\n also have permissions for the kms:GenerateDataKey action for\n the CreateMultipartUpload API. Then, the requester needs\n permissions for the kms:Decrypt action on the\n UploadPart and UploadPartCopy APIs.

    \n

    These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting data\n using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n
  • \n
\n
\n
Data integrity
\n
\n

\n General purpose bucket - To ensure that data is not corrupted traversing the network, specify the\n Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256 header as a checksum instead of\n Content-MD5. For more information see Authenticating\n Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

\n \n

\n Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.

\n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. You have \n mutually exclusive options to protect data using server-side encryption in Amazon S3, depending\n on how you choose to manage the encryption keys. Specifically, the encryption key options\n are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys\n (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by\n default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption\n with other key options. The option you use depends on whether you want to use KMS keys\n (SSE-KMS) or provide your own encryption key (SSE-C).

    \n

    Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are\n using a customer-provided encryption key (SSE-C), you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.

    \n

    If you request server-side encryption using a customer-provided encryption key (SSE-C)\n in your initiate multipart upload request, you must provide identical encryption\n information in each part upload using the following request headers.

    \n
      \n
    • \n

      x-amz-server-side-encryption-customer-algorithm

      \n
    • \n
    • \n

      x-amz-server-side-encryption-customer-key

      \n
    • \n
    • \n

      x-amz-server-side-encryption-customer-key-MD5

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

    \n
  • \n
\n

\n For more information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.

\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    • \n

      SOAP Fault Code Prefix: Client

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to UploadPart:

\n ", + "smithy.api#documentation": "

Uploads a part in a multipart upload.

\n \n

In this operation, you provide new data as a part of an object in your request. However, you have an option\n to specify your existing Amazon S3 object as a data source for the part you are uploading. To\n upload a part from an existing object, you use the UploadPartCopy operation.\n

\n
\n

You must initiate a multipart upload (see CreateMultipartUpload)\n before you can upload any part. In response to your initiate request, Amazon S3 returns an\n upload ID, a unique identifier that you must include in your upload part request.

\n

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely\n identifies a part and also defines its position within the object being created. If you\n upload a new part using the same part number that was used with a previous part, the\n previously uploaded part is overwritten.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

After you initiate multipart upload and upload\n one or more parts, you must either complete or abort multipart upload in order to stop\n getting charged for storage of the uploaded parts. Only after you either complete or abort\n multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts\n storage.

\n
\n

For more information on multipart uploads, go to Multipart Upload Overview in the\n Amazon S3 User Guide .

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Permissions
\n
\n
    \n
  • \n

    \n General purpose bucket permissions - To\n perform a multipart upload with encryption using an Key Management Service key, the\n requester must have permission to the kms:Decrypt and\n kms:GenerateDataKey actions on the key. The requester must\n also have permissions for the kms:GenerateDataKey action for\n the CreateMultipartUpload API. Then, the requester needs\n permissions for the kms:Decrypt action on the\n UploadPart and UploadPartCopy APIs.

    \n

    These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting data\n using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the \n CreateSession\n API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. \nAmazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see \n CreateSession\n .

    \n

    If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n
  • \n
\n
\n
Data integrity
\n
\n

\n General purpose bucket - To ensure that data is not corrupted traversing the network, specify the\n Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the\n x-amz-content-sha256 header as a checksum instead of\n Content-MD5. For more information see Authenticating\n Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

\n \n

\n Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.

\n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose bucket - Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it\n writes it to disks in its data centers and decrypts it when you access it. You have \n mutually exclusive options to protect data using server-side encryption in Amazon S3, depending\n on how you choose to manage the encryption keys. Specifically, the encryption key options\n are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys\n (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by\n default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption\n with other key options. The option you use depends on whether you want to use KMS keys\n (SSE-KMS) or provide your own encryption key (SSE-C).

    \n

    Server-side encryption is supported by the S3 Multipart Upload operations. Unless you are\n using a customer-provided encryption key (SSE-C), you don't need to specify the encryption\n parameters in each UploadPart request. Instead, you only need to specify the server-side\n encryption parameters in the initial Initiate Multipart request. For more information, see\n CreateMultipartUpload.

    \n

    If you request server-side encryption using a customer-provided encryption key (SSE-C)\n in your initiate multipart upload request, you must provide identical encryption\n information in each part upload using the following request headers.

    \n
      \n
    • \n

      x-amz-server-side-encryption-customer-algorithm

      \n
    • \n
    • \n

      x-amz-server-side-encryption-customer-key

      \n
    • \n
    • \n

      x-amz-server-side-encryption-customer-key-MD5

      \n
    • \n
    \n

    \n For more information, see Using Server-Side\n Encryption in the Amazon S3 User Guide.

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms).

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    • \n

      SOAP Fault Code Prefix: Client

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to UploadPart:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=UploadPart", @@ -33785,7 +33919,7 @@ "target": "com.amazonaws.s3#UploadPartCopyOutput" }, "traits": { - "smithy.api#documentation": "

Uploads a part by copying data from an existing object as data source. To specify the\n data source, you add the request header x-amz-copy-source in your request. To specify \n a byte range, you add the request header x-amz-copy-source-range in your\n request.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

Instead of copying data from an existing object as part data, you might use the UploadPart\n action to upload new data as a part of an object in your request.

\n
\n

You must initiate a multipart upload before you can upload any part. In response to your\n initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in\n your upload part request.

\n

For conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Authentication and authorization
\n
\n

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have READ access to the source object and WRITE\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions - You\n must have the permissions in a policy based on the bucket types of your\n source bucket and destination bucket in an UploadPartCopy\n operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have the\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have the\n \n s3:PutObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    • \n

      To perform a multipart upload with encryption using an Key Management Service\n key, the requester must have permission to the\n kms:Decrypt and kms:GenerateDataKey\n actions on the key. The requester must also have permissions for the\n kms:GenerateDataKey action for the\n CreateMultipartUpload API. Then, the requester needs\n permissions for the kms:Decrypt action on the\n UploadPart and UploadPartCopy APIs. These\n permissions are required because Amazon S3 must decrypt and read data from\n the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting\n data using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload\n and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in an UploadPartCopy operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By\n default, the session is in the ReadWrite mode. If you\n want to restrict the access, you can explicitly set the\n s3express:SessionMode condition key to\n ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key cannot be set to ReadOnly on the copy destination.

      \n
    • \n
    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose buckets - \n \n For information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.\n

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidRequest\n

    \n
      \n
    • \n

      Description: The specified copy source is not supported as a\n byte-range copy source.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to UploadPartCopy:

\n ", + "smithy.api#documentation": "

Uploads a part by copying data from an existing object as data source. To specify the\n data source, you add the request header x-amz-copy-source in your request. To specify \n a byte range, you add the request header x-amz-copy-source-range in your\n request.

\n

For information about maximum and minimum part sizes and other multipart upload\n specifications, see Multipart upload limits in the Amazon S3 User Guide.

\n \n

Instead of copying data from an existing object as part data, you might use the UploadPart\n action to upload new data as a part of an object in your request.

\n
\n

You must initiate a multipart upload before you can upload any part. In response to your\n initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in\n your upload part request.

\n

For conceptual information about multipart uploads, see Uploading\n Objects Using Multipart Upload in the\n Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart\n upload, see Operations on Objects in\n the Amazon S3 User Guide.

\n \n

\n Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name\n . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the\n Amazon S3 User Guide.

\n
\n
\n
Authentication and authorization
\n
\n

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed. For more information, see REST Authentication.

\n

\n Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the \n temporary security credentials through the CreateSession API operation.

\n

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

\n
\n
Permissions
\n
\n

You must have READ access to the source object and WRITE\n access to the destination bucket.

\n
    \n
  • \n

    \n General purpose bucket permissions - You\n must have the permissions in a policy based on the bucket types of your\n source bucket and destination bucket in an UploadPartCopy\n operation.

    \n
      \n
    • \n

      If the source object is in a general purpose bucket, you must have the\n \n s3:GetObject\n \n permission to read the source object that is being copied.

      \n
    • \n
    • \n

      If the destination bucket is a general purpose bucket, you must have the\n \n s3:PutObject\n \n permission to write the object copy to the destination bucket.

      \n
    • \n
    • \n

      To perform a multipart upload with encryption using an Key Management Service\n key, the requester must have permission to the\n kms:Decrypt and kms:GenerateDataKey\n actions on the key. The requester must also have permissions for the\n kms:GenerateDataKey action for the\n CreateMultipartUpload API. Then, the requester needs\n permissions for the kms:Decrypt action on the\n UploadPart and UploadPartCopy APIs. These\n permissions are required because Amazon S3 must decrypt and read data from\n the encrypted file parts before it completes the multipart upload. For\n more information about KMS permissions, see Protecting\n data using server-side encryption with KMS in the\n Amazon S3 User Guide. For information about the\n permissions required to use the multipart upload API, see Multipart upload\n and permissions and Multipart upload API and permissions in the\n Amazon S3 User Guide.

      \n
    • \n
    \n
  • \n
  • \n

    \n Directory bucket permissions -\n You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination\n bucket types in an UploadPartCopy operation.

    \n
      \n
    • \n

      If the source object that you want to copy is in a\n directory bucket, you must have the \n s3express:CreateSession\n permission in\n the Action element of a policy to read the object. By\n default, the session is in the ReadWrite mode. If you\n want to restrict the access, you can explicitly set the\n s3express:SessionMode condition key to\n ReadOnly on the copy source bucket.

      \n
    • \n
    • \n

      If the copy destination is a directory bucket, you must have the \n \n s3express:CreateSession\n permission in the\n Action element of a policy to write the object\n to the destination. The s3express:SessionMode condition\n key cannot be set to ReadOnly on the copy destination.

      \n
    • \n
    \n

    If the object is encrypted with\n SSE-KMS, you must also have the\n kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    \n

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the\n Amazon S3 User Guide.

    \n
  • \n
\n
\n
Encryption
\n
\n
    \n
  • \n

    \n General purpose buckets - \n \n For information about using server-side encryption with customer-provided\n encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.\n

    \n
  • \n
  • \n

    \n Directory buckets - For directory buckets, there are only two supported options for server-side encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) and server-side encryption with KMS keys (SSE-KMS) (aws:kms). For more\n information, see Protecting data with server-side encryption in the Amazon S3 User Guide.

    \n \n

    For directory buckets, when you perform a CreateMultipartUpload operation and an UploadPartCopy operation, \n the request headers you provide in the CreateMultipartUpload request must match the default encryption configuration of the destination bucket.

    \n
    \n

    S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets \nto directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through UploadPartCopy. In this case, Amazon S3 makes a call to KMS every time a copy request is made for a KMS-encrypted object.

    \n
  • \n
\n
\n
Special errors
\n
\n
    \n
  • \n

    Error Code: NoSuchUpload\n

    \n
      \n
    • \n

      Description: The specified multipart upload does not exist. The\n upload ID might be invalid, or the multipart upload might have been\n aborted or completed.

      \n
    • \n
    • \n

      HTTP Status Code: 404 Not Found

      \n
    • \n
    \n
  • \n
  • \n

    Error Code: InvalidRequest\n

    \n
      \n
    • \n

      Description: The specified copy source is not supported as a\n byte-range copy source.

      \n
    • \n
    • \n

      HTTP Status Code: 400 Bad Request

      \n
    • \n
    \n
  • \n
\n
\n
HTTP Host header syntax
\n
\n

\n Directory buckets - The HTTP Host header syntax is \n Bucket_name.s3express-az_id.region.amazonaws.com.

\n
\n
\n

The following operations are related to UploadPartCopy:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=UploadPartCopy", @@ -33818,7 +33952,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -33839,14 +33973,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -34015,7 +34149,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

\n \n

For directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) (AES256) is supported.

\n
", + "smithy.api#documentation": "

The server-side encryption algorithm used when you store this object in Amazon S3 (for example,\n AES256, aws:kms).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -34029,14 +34163,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32 checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The base64-encoded, 32-bit CRC-32C checksum of the object. This will only be present if it was uploaded\n with the object. When you use an API operation on an object that was uploaded using multipart uploads, this value may not be a direct checksum value of the full object. Instead, it's a calculation based on the checksum values of each individual part. For more information about how checksums are calculated\n with multipart uploads, see \n Checking object integrity in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -34071,14 +34205,14 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, indicates the ID of the Key Management Service (KMS) symmetric encryption customer managed key\n that was used for the object.

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

If present, indicates the ID of the KMS key that was used for object encryption.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, "BucketKeyEnabled": { "target": "com.amazonaws.s3#BucketKeyEnabled", "traits": { - "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

\n \n

This functionality is not supported for directory buckets.

\n
", + "smithy.api#documentation": "

Indicates whether the multipart upload uses an S3 Bucket Key for server-side encryption\n with Key Management Service (KMS) keys (SSE-KMS).

", "smithy.api#httpHeader": "x-amz-server-side-encryption-bucket-key-enabled" } }, @@ -34139,14 +34273,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32 checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is the same data that was originally sent.\n This header specifies the base64-encoded, 32-bit CRC-32C checksum of the object. For more information, see\n Checking object integrity in the\n Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-checksum-crc32c" } }, @@ -34441,14 +34575,14 @@ "ChecksumCRC32": { "target": "com.amazonaws.s3#ChecksumCRC32", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

\n

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

\n

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-crc32" } }, "ChecksumCRC32C": { "target": "com.amazonaws.s3#ChecksumCRC32C", "traits": { - "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32C\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", + "smithy.api#documentation": "

This header can be used as a data integrity check to verify that the data received is\n the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC-32C\n checksum of the object returned by the Object Lambda function. This may not match the\n checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values\n only when the original GetObject request required checksum validation. For\n more information about checksums, see Checking object\n integrity in the Amazon S3 User Guide.

\n

Only one checksum header can be specified at a time. If you supply multiple checksum\n headers, this request will fail.

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-checksum-crc32c" } }, diff --git a/models/sagemaker-metrics.json b/models/sagemaker-metrics.json index 0d050f622a..0c513677fc 100644 --- a/models/sagemaker-metrics.json +++ b/models/sagemaker-metrics.json @@ -29,6 +29,53 @@ ] }, "shapes": { + "com.amazonaws.sagemakermetrics#BatchGetMetrics": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemakermetrics#BatchGetMetricsRequest" + }, + "output": { + "target": "com.amazonaws.sagemakermetrics#BatchGetMetricsResponse" + }, + "traits": { + "smithy.api#documentation": "

Used to retrieve training metrics from SageMaker.

", + "smithy.api#http": { + "method": "POST", + "uri": "/BatchGetMetrics", + "code": 200 + } + } + }, + "com.amazonaws.sagemakermetrics#BatchGetMetricsRequest": { + "type": "structure", + "members": { + "MetricQueries": { + "target": "com.amazonaws.sagemakermetrics#MetricQueryList", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

Queries made to retrieve training metrics from SageMaker.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.sagemakermetrics#BatchGetMetricsResponse": { + "type": "structure", + "members": { + "MetricQueryResults": { + "target": "com.amazonaws.sagemakermetrics#MetricQueryResultList", + "traits": { + "smithy.api#documentation": "

The results of a query to retrieve training metrics from SageMaker.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.sagemakermetrics#BatchPutMetrics": { "type": "operation", "input": { @@ -38,7 +85,7 @@ "target": "com.amazonaws.sagemakermetrics#BatchPutMetricsResponse" }, "traits": { - "smithy.api#documentation": "

Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio and\n retrieved with the GetMetrics API.\n

", + "smithy.api#documentation": "

Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio.\n

", "smithy.api#http": { "method": "PUT", "uri": "/BatchPutMetrics", @@ -85,7 +132,7 @@ "target": "com.amazonaws.sagemakermetrics#ExperimentEntityName", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The name of the Trial Component to associate with the metrics.

", + "smithy.api#documentation": "

The name of the Trial Component to associate with the metrics. The Trial Component name must be entirely lowercase.

", "smithy.api#required": {} } }, @@ -126,12 +173,25 @@ "min": 1, "max": 120 }, - "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,119}$" + "smithy.api#pattern": "^[a-z0-9](-*[a-z0-9]){0,119}$" } }, "com.amazonaws.sagemakermetrics#Integer": { "type": "integer" }, + "com.amazonaws.sagemakermetrics#Long": { + "type": "long" + }, + "com.amazonaws.sagemakermetrics#Message": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": ".*" + } + }, "com.amazonaws.sagemakermetrics#MetricName": { "type": "string", "traits": { @@ -142,6 +202,233 @@ "smithy.api#pattern": "^.+$" } }, + "com.amazonaws.sagemakermetrics#MetricQuery": { + "type": "structure", + "members": { + "MetricName": { + "target": "com.amazonaws.sagemakermetrics#MetricName", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The name of the metric to retrieve.

", + "smithy.api#required": {} + } + }, + "ResourceArn": { + "target": "com.amazonaws.sagemakermetrics#SageMakerResourceArn", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The ARN of the SageMaker resource to retrieve metrics for.

", + "smithy.api#required": {} + } + }, + "MetricStat": { + "target": "com.amazonaws.sagemakermetrics#MetricStatistic", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The metrics stat type of metrics to retrieve.

", + "smithy.api#required": {} + } + }, + "Period": { + "target": "com.amazonaws.sagemakermetrics#Period", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The time period of metrics to retrieve.

", + "smithy.api#required": {} + } + }, + "XAxisType": { + "target": "com.amazonaws.sagemakermetrics#XAxisType", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The x-axis type of metrics to retrieve.

", + "smithy.api#required": {} + } + }, + "Start": { + "target": "com.amazonaws.sagemakermetrics#Long", + "traits": { + "smithy.api#documentation": "

The start time of metrics to retrieve.

" + } + }, + "End": { + "target": "com.amazonaws.sagemakermetrics#Long", + "traits": { + "smithy.api#documentation": "

The end time of metrics to retrieve.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies a query to retrieve training metrics from SageMaker.

" + } + }, + "com.amazonaws.sagemakermetrics#MetricQueryList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakermetrics#MetricQuery" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.sagemakermetrics#MetricQueryResult": { + "type": "structure", + "members": { + "Status": { + "target": "com.amazonaws.sagemakermetrics#MetricQueryResultStatus", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The status of the metric query.

", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.sagemakermetrics#Message", + "traits": { + "smithy.api#documentation": "

A message describing the status of the metric query.

" + } + }, + "XAxisValues": { + "target": "com.amazonaws.sagemakermetrics#XAxisValues", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The values for the x-axis of the metrics.

", + "smithy.api#required": {} + } + }, + "MetricValues": { + "target": "com.amazonaws.sagemakermetrics#MetricValues", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The metric values retrieved by the query.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The result of a query to retrieve training metrics from SageMaker.

" + } + }, + "com.amazonaws.sagemakermetrics#MetricQueryResultList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakermetrics#MetricQueryResult" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.sagemakermetrics#MetricQueryResultStatus": { + "type": "enum", + "members": { + "COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Complete" + } + }, + "TRUNCATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Truncated" + } + }, + "INTERNAL_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "InternalError" + } + }, + "VALIDATION_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ValidationError" + } + } + } + }, + "com.amazonaws.sagemakermetrics#MetricStatistic": { + "type": "enum", + "members": { + "MIN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Min" + } + }, + "MAX": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Max" + } + }, + "AVG": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Avg" + } + }, + "COUNT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Count" + } + }, + "STD_DEV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "StdDev" + } + }, + "LAST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Last" + } + } + } + }, + "com.amazonaws.sagemakermetrics#MetricValues": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakermetrics#Double" + } + }, + "com.amazonaws.sagemakermetrics#Period": { + "type": "enum", + "members": { + "ONE_MINUTE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OneMinute" + } + }, + "FIVE_MINUTE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FiveMinute" + } + }, + "ONE_HOUR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OneHour" + } + }, + "ITERATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IterationNumber" + } + } + } + }, "com.amazonaws.sagemakermetrics#PutMetricsErrorCode": { "type": "enum", "members": { @@ -225,6 +512,9 @@ "type": "service", "version": "2022-09-30", "operations": [ + { + "target": "com.amazonaws.sagemakermetrics#BatchGetMetrics" + }, { "target": "com.amazonaws.sagemakermetrics#BatchPutMetrics" } @@ -909,6 +1199,16 @@ } } }, + "com.amazonaws.sagemakermetrics#SageMakerResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-].*/" + } + }, "com.amazonaws.sagemakermetrics#Step": { "type": "integer", "traits": { @@ -919,6 +1219,29 @@ }, "com.amazonaws.sagemakermetrics#Timestamp": { "type": "timestamp" + }, + "com.amazonaws.sagemakermetrics#XAxisType": { + "type": "enum", + "members": { + "ITERATION_NUMBER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IterationNumber" + } + }, + "TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Timestamp" + } + } + } + }, + "com.amazonaws.sagemakermetrics#XAxisValues": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemakermetrics#Long" + } } } } diff --git a/models/sagemaker.json b/models/sagemaker.json index af0a063bf0..0efafed018 100644 --- a/models/sagemaker.json +++ b/models/sagemaker.json @@ -1487,6 +1487,54 @@ "smithy.api#enumValue": "ml.g6.48xlarge" } }, + "ML_G6E_XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.xlarge" + } + }, + "ML_G6E_2XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.2xlarge" + } + }, + "ML_G6E_4XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.4xlarge" + } + }, + "ML_G6E_8XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.8xlarge" + } + }, + "ML_G6E_12XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.12xlarge" + } + }, + "ML_G6E_16XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.16xlarge" + } + }, + "ML_G6E_24XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.24xlarge" + } + }, + "ML_G6E_48XLARGE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ml.g6e.48xlarge" + } + }, "ML_GEOSPATIAL_INTERACTIVE": { "target": "smithy.api#Unit", "traits": { @@ -2028,7 +2076,7 @@ } }, "traits": { - "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of Amazon SageMaker Studio applications.

" + "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of Amazon SageMaker Studio\n applications.

" } }, "com.amazonaws.sagemaker#AppList": { @@ -6653,7 +6701,7 @@ "type": "integer", "traits": { "smithy.api#range": { - "min": 1 + "min": 0 } } }, @@ -7570,7 +7618,13 @@ "AppLifecycleManagement": { "target": "com.amazonaws.sagemaker#AppLifecycleManagement", "traits": { - "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of CodeEditor applications.

" + "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of CodeEditor\n applications.

" + } + }, + "BuiltInLifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default \n lifecycle configuration.

" } } }, @@ -9865,6 +9919,12 @@ "smithy.api#documentation": "

The entity that creates and manages the required security groups for inter-app\n communication in VPCOnly mode. Required when\n CreateDomain.AppNetworkAccessType is VPCOnly and\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided. If setting up the domain for use with RStudio, this value must be set to\n Service.

" } }, + "TagPropagation": { + "target": "com.amazonaws.sagemaker#TagPropagation", + "traits": { + "smithy.api#documentation": "

Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED.

" + } + }, "DefaultSpaceSettings": { "target": "com.amazonaws.sagemaker#DefaultSpaceSettings", "traits": { @@ -17797,6 +17857,12 @@ "traits": { "smithy.api#documentation": "

The instance type and the Amazon Resource Name (ARN) of the SageMaker image\n created on the instance.

" } + }, + "BuiltInLifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The lifecycle configuration that runs before the default lifecycle configuration

" + } } }, "traits": { @@ -19290,6 +19356,12 @@ "smithy.api#documentation": "

The entity that creates and manages the required security groups for inter-app\n communication in VPCOnly mode. Required when\n CreateDomain.AppNetworkAccessType is VPCOnly and\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided.

" } }, + "TagPropagation": { + "target": "com.amazonaws.sagemaker#TagPropagation", + "traits": { + "smithy.api#documentation": "

Indicates whether custom tag propagation is supported for the domain.

" + } + }, "DefaultSpaceSettings": { "target": "com.amazonaws.sagemaker#DefaultSpaceSettings", "traits": { @@ -30305,12 +30377,50 @@ "target": "com.amazonaws.sagemaker#AppType" } }, + "com.amazonaws.sagemaker#HiddenInstanceTypesList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#AppInstanceType" + } + }, "com.amazonaws.sagemaker#HiddenMlToolsList": { "type": "list", "member": { "target": "com.amazonaws.sagemaker#MlTools" } }, + "com.amazonaws.sagemaker#HiddenSageMakerImage": { + "type": "structure", + "members": { + "SageMakerImageName": { + "target": "com.amazonaws.sagemaker#SageMakerImageName", + "traits": { + "smithy.api#documentation": "

\n The SageMaker image name that you are hiding from the Studio user interface.\n

" + } + }, + "VersionAliases": { + "target": "com.amazonaws.sagemaker#VersionAliasesList", + "traits": { + "smithy.api#documentation": "

\n The version aliases you are hiding from the Studio user interface.\n

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The SageMaker images that are hidden from the Studio user interface. You must specify the SageMaker\n image name and version aliases.

" + } + }, + "com.amazonaws.sagemaker#HiddenSageMakerImageVersionAliasesList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#HiddenSageMakerImage" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, "com.amazonaws.sagemaker#HolidayConfig": { "type": "list", "member": { @@ -31041,9 +31151,7 @@ "PreHumanTaskLambdaArn": { "target": "com.amazonaws.sagemaker#LambdaFunctionArn", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object\n is sent to a human worker. Use this function to provide input to a custom labeling\n job.

\n

For built-in\n task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n PreHumanTaskLambdaArn. For custom labeling workflows, see Pre-annotation Lambda.

\n

\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox\n

    \n
  • \n
\n

\n Image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of an image based on\n annotations from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass\n

    \n
  • \n
\n

\n Multi-label image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
\n

\n Semantic segmentation - Treats each pixel in an image as\n a multi-class classification and treats pixel annotations from workers as\n \"votes\" for the correct label.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation\n

    \n
  • \n
\n

\n Text classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of text based on annotations\n from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass\n

    \n
  • \n
\n

\n Multi-label text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
\n

\n Named entity recognition - Groups similar selections and\n calculates aggregate boundaries, resolving to most-assigned label.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition\n

    \n
  • \n
\n

\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass\n

    \n
  • \n
\n

\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection\n

    \n
  • \n
\n

\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking\n

    \n
  • \n
\n

\n 3D Point Cloud Modalities\n

\n

Use the following pre-annotation lambdas for 3D point cloud labeling modality tasks.\n See 3D Point Cloud Task types\n to learn more.

\n

\n 3D Point Cloud Object Detection - \n Use this task type when you want workers to classify objects in a 3D point cloud by \n drawing 3D cuboids around objects. For example, you can use this task type to ask workers \n to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
\n

\n 3D Point Cloud Object Tracking - \n Use this task type when you want workers to draw 3D cuboids around objects\n that appear in a sequence of 3D point cloud frames. \n For example, you can use this task type to ask workers to track \n the movement of vehicles across multiple point cloud frames.\n

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
\n

\n 3D Point Cloud Semantic Segmentation - \n Use this task type when you want workers to create a point-level semantic segmentation masks by \n painting objects in a 3D point cloud using different colors where each color is assigned to one of \n the classes you specify.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
\n

\n Use the following ARNs for Label Verification and Adjustment Jobs\n

\n

Use label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .

\n

\n Bounding box verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification\n judgement for bounding box labels based on annotations from individual\n workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox\n

    \n
  • \n
\n

\n Bounding box adjustment - Finds the most similar boxes\n from different workers based on the Jaccard index of the adjusted\n annotations.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
\n

\n Semantic segmentation verification - Uses a variant of\n the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
\n

\n Semantic segmentation adjustment - Treats each pixel in\n an image as a multi-class classification and treats pixel adjusted annotations\n from workers as \"votes\" for the correct label.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
\n

\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
\n

\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
\n

\n 3D point cloud object detection adjustment - Adjust\n 3D cuboids in a point cloud frame.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
\n

\n 3D point cloud object tracking adjustment - Adjust 3D\n cuboids across a sequence of point cloud frames.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
\n

\n 3D point cloud semantic segmentation adjustment -\n Adjust semantic segmentation masks in a 3D point cloud.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
", - "smithy.api#required": {} + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Lambda function that is run before a data object\n is sent to a human worker. Use this function to provide input to a custom labeling\n job.

\n

For built-in\n task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n PreHumanTaskLambdaArn. For custom labeling workflows, see Pre-annotation Lambda.

\n

\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox\n

    \n
  • \n
\n

\n Image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of an image based on\n annotations from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass\n

    \n
  • \n
\n

\n Multi-label image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel\n

    \n
  • \n
\n

\n Semantic segmentation - Treats each pixel in an image as\n a multi-class classification and treats pixel annotations from workers as\n \"votes\" for the correct label.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation\n

    \n
  • \n
\n

\n Text classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of text based on annotations\n from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass\n

    \n
  • \n
\n

\n Multi-label text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel\n

    \n
  • \n
\n

\n Named entity recognition - Groups similar selections and\n calculates aggregate boundaries, resolving to most-assigned label.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition\n

    \n
  • \n
\n

\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass\n

    \n
  • \n
\n

\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection\n

    \n
  • \n
\n

\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking\n

    \n
  • \n
\n

\n 3D Point Cloud Modalities\n

\n

Use the following pre-annotation lambdas for 3D point cloud labeling modality tasks.\n See 3D Point Cloud Task types\n to learn more.

\n

\n 3D Point Cloud Object Detection - \n Use this task type when you want workers to classify objects in a 3D point cloud by \n drawing 3D cuboids around objects. For example, you can use this task type to ask workers \n to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection\n

    \n
  • \n
\n

\n 3D Point Cloud Object Tracking - \n Use this task type when you want workers to draw 3D cuboids around objects\n that appear in a sequence of 3D point cloud frames. \n For example, you can use this task type to ask workers to track \n the movement of vehicles across multiple point cloud frames.\n

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking\n

    \n
  • \n
\n

\n 3D Point Cloud Semantic Segmentation - \n Use this task type when you want workers to create a point-level semantic segmentation masks by \n painting objects in a 3D point cloud using different colors where each color is assigned to one of \n the classes you specify.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation\n

    \n
  • \n
\n

\n Use the following ARNs for Label Verification and Adjustment Jobs\n

\n

Use label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .

\n

\n Bounding box verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification\n judgement for bounding box labels based on annotations from individual\n workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox\n

    \n
  • \n
\n

\n Bounding box adjustment - Finds the most similar boxes\n from different workers based on the Jaccard index of the adjusted\n annotations.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox\n

    \n
  • \n
\n

\n Semantic segmentation verification - Uses a variant of\n the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation\n

    \n
  • \n
\n

\n Semantic segmentation adjustment - Treats each pixel in\n an image as a multi-class classification and treats pixel adjusted annotations\n from workers as \"votes\" for the correct label.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation\n

    \n
  • \n
\n

\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection\n

    \n
  • \n
\n

\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking\n

    \n
  • \n
\n

\n 3D point cloud object detection adjustment - Adjust\n 3D cuboids in a point cloud frame.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection\n

    \n
  • \n
\n

\n 3D point cloud object tracking adjustment - Adjust 3D\n cuboids across a sequence of point cloud frames.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking\n

    \n
  • \n
\n

\n 3D point cloud semantic segmentation adjustment -\n Adjust semantic segmentation masks in a 3D point cloud.

\n
    \n
  • \n

    \n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
  • \n

    \n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation\n

    \n
  • \n
" } }, "TaskKeywords": { @@ -31099,9 +31207,7 @@ "AnnotationConsolidationConfig": { "target": "com.amazonaws.sagemaker#AnnotationConsolidationConfig", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

Configures how labels are consolidated across human workers.

", - "smithy.api#required": {} + "smithy.api#documentation": "

Configures how labels are consolidated across human workers.

" } }, "PublicWorkforceTaskPrice": { @@ -32371,7 +32477,7 @@ "IdleTimeoutInMinutes": { "target": "com.amazonaws.sagemaker#IdleTimeoutInMinutes", "traits": { - "smithy.api#documentation": "

The time that SageMaker waits after the application becomes idle before shutting it down.

" + "smithy.api#documentation": "

The time that SageMaker waits after the application becomes idle before shutting it\n down.

" } }, "MinIdleTimeoutInMinutes": { @@ -32772,6 +32878,16 @@ "smithy.api#pattern": "^(^\\d+$)|(^\\d+.\\d+$)|(^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$)$" } }, + "com.amazonaws.sagemaker#ImageVersionAliasPattern": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)$" + } + }, "com.amazonaws.sagemaker#ImageVersionArn": { "type": "string", "traits": { @@ -35449,6 +35565,12 @@ "traits": { "smithy.api#documentation": "

The configuration parameters that specify the IAM roles assumed by the execution role of \n SageMaker (assumable roles) and the cluster instances or job execution environments \n (execution roles or runtime roles) to manage and access resources required for running Amazon EMR\n clusters or Amazon EMR Serverless applications.

" } + }, + "BuiltInLifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The lifecycle configuration that runs before the default lifecycle configuration. It can override changes made in the default \n lifecycle configuration.

" + } } }, "traits": { @@ -36099,9 +36221,7 @@ "PreHumanTaskLambdaArn": { "target": "com.amazonaws.sagemaker#LambdaFunctionArn", "traits": { - "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Lambda function. The function is run before each\n data object is sent to a worker.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of a Lambda function. The function is run before each\n data object is sent to a worker.

" } }, "AnnotationConsolidationLambdaArn": { @@ -44640,6 +44760,12 @@ "traits": { "smithy.api#enumValue": "InferenceOptimization" } + }, + "PERFORMANCE_EVALUATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PerformanceEvaluation" + } } } }, @@ -57236,6 +57362,12 @@ "traits": { "smithy.api#documentation": "

Configuration information for hub access.

" } + }, + "ManifestS3Uri": { + "target": "com.amazonaws.sagemaker#S3ModelUri", + "traits": { + "smithy.api#documentation": "

The Amazon S3 URI of the manifest file. The manifest file is a CSV file that stores the artifact locations.

" + } } }, "traits": { @@ -59376,6 +59508,17 @@ } } }, + "com.amazonaws.sagemaker#SageMakerImageName": { + "type": "enum", + "members": { + "sagemaker_distribution": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "sagemaker_distribution" + } + } + } + }, "com.amazonaws.sagemaker#SageMakerImageVersionAlias": { "type": "string", "traits": { @@ -60856,7 +60999,7 @@ } }, "traits": { - "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of Amazon SageMaker Studio applications in a space.

" + "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of Amazon SageMaker Studio\n applications in a space.

" } }, "com.amazonaws.sagemaker#SpaceArn": { @@ -60878,7 +61021,7 @@ "AppLifecycleManagement": { "target": "com.amazonaws.sagemaker#SpaceAppLifecycleManagement", "traits": { - "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of CodeEditor applications in a space.

" + "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of CodeEditor applications in\n a space.

" } } }, @@ -60963,7 +61106,7 @@ "IdleTimeoutInMinutes": { "target": "com.amazonaws.sagemaker#IdleTimeoutInMinutes", "traits": { - "smithy.api#documentation": "

The time that SageMaker waits after the application becomes idle before shutting it down.

" + "smithy.api#documentation": "

The time that SageMaker waits after the application becomes idle before shutting it\n down.

" } } }, @@ -60986,7 +61129,7 @@ "AppLifecycleManagement": { "target": "com.amazonaws.sagemaker#SpaceAppLifecycleManagement", "traits": { - "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of JupyterLab applications in a space.

" + "smithy.api#documentation": "

Settings that are used to configure and manage the lifecycle of JupyterLab applications in\n a space.

" } } }, @@ -62608,6 +62751,18 @@ "traits": { "smithy.api#documentation": "

The Applications supported in Studio that are hidden from the Studio left navigation\n pane.

" } + }, + "HiddenInstanceTypes": { + "target": "com.amazonaws.sagemaker#HiddenInstanceTypesList", + "traits": { + "smithy.api#documentation": "

\n The instance types you are hiding from the Studio user interface.\n

" + } + }, + "HiddenSageMakerImageVersionAliases": { + "target": "com.amazonaws.sagemaker#HiddenSageMakerImageVersionAliasesList", + "traits": { + "smithy.api#documentation": "

\n The version aliases you are hiding from the Studio user interface.\n

" + } } }, "traits": { @@ -62869,6 +63024,23 @@ } } }, + "com.amazonaws.sagemaker#TagPropagation": { + "type": "enum", + "members": { + "ENABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ENABLED" + } + }, + "DISABLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DISABLED" + } + } + } + }, "com.amazonaws.sagemaker#TagValue": { "type": "string", "traits": { @@ -68025,6 +68197,12 @@ "traits": { "smithy.api#documentation": "

Specifies the VPC used for non-EFS traffic.

\n
    \n
  • \n

    \n PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon SageMaker, which allows direct internet access.

    \n
  • \n
  • \n

    \n VpcOnly - All Studio traffic is through the specified VPC and\n subnets.

    \n
  • \n
\n

This configuration can only be modified if there are no apps in the\n InService, Pending, or Deleting state. The\n configuration cannot be updated if\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is already\n set or DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn is\n provided as part of the same request.

" } + }, + "TagPropagation": { + "target": "com.amazonaws.sagemaker#TagPropagation", + "traits": { + "smithy.api#documentation": "

Indicates whether custom tag propagation is supported for the domain. Defaults to DISABLED.

" + } } }, "traits": { @@ -70402,7 +70580,7 @@ "AutoMountHomeEFS": { "target": "com.amazonaws.sagemaker#AutoMountHomeEFS", "traits": { - "smithy.api#documentation": "

Indicates whether auto-mounting of an EFS volume is supported for the user profile. The DefaultAsDomain value is only supported for user profiles. Do not use the DefaultAsDomain value when setting this parameter for a domain.

" + "smithy.api#documentation": "

Indicates whether auto-mounting of an EFS volume is supported for the user profile. The\n DefaultAsDomain value is only supported for user profiles. Do not use the\n DefaultAsDomain value when setting this parameter for a domain.

" } } }, @@ -70603,6 +70781,18 @@ } } }, + "com.amazonaws.sagemaker#VersionAliasesList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#ImageVersionAliasPattern" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.sagemaker#VersionId": { "type": "string", "traits": { diff --git a/models/securityhub.json b/models/securityhub.json index e431233ae1..ea5821d1a0 100644 --- a/models/securityhub.json +++ b/models/securityhub.json @@ -55,7 +55,7 @@ } ], "traits": { - "smithy.api#documentation": "

Accepts the invitation to be a member account and be monitored by the Security Hub administrator\n account that the invitation was sent from.

\n

This operation is only used by member accounts that are not added through\n Organizations.

\n

When the member account accepts the invitation, permission is granted to the administrator\n account to view findings generated in the member account.

", + "smithy.api#documentation": "\n

We recommend using Organizations instead of Security Hub invitations to manage your member accounts. \n For information, see Managing Security Hub administrator and member accounts with Organizations \n in the Security Hub User Guide.

\n
\n

Accepts the invitation to be a member account and be monitored by the Security Hub administrator\n account that the invitation was sent from.

\n

This operation is only used by member accounts that are not added through\n Organizations.

\n

When the member account accepts the invitation, permission is granted to the administrator\n account to view findings generated in the member account.

", "smithy.api#examples": [ { "title": "To accept an invitation be a member account", @@ -674,7 +674,7 @@ "Type": { "target": "com.amazonaws.securityhub#AutomationRulesActionType", "traits": { - "smithy.api#documentation": "

\n Specifies that the rule action should update the Types finding field. The Types \n finding field classifies findings in the format of namespace/category/classifier. For more information, see\n Types taxonomy for ASFF in \n the Security Hub User Guide.\n

" + "smithy.api#documentation": "

\n Specifies the type of action that Security Hub takes when a finding matches the defined criteria of a rule.\n

" } }, "FindingFieldsUpdate": { @@ -685,7 +685,7 @@ } }, "traits": { - "smithy.api#documentation": "

\n One or more actions to update finding fields if a finding matches the defined criteria \n of the rule.\n

" + "smithy.api#documentation": "

\n One or more actions that Security Hub takes when a finding matches the defined criteria \n of a rule.\n

" } }, "com.amazonaws.securityhub#AutomationRulesActionType": { @@ -958,7 +958,7 @@ "ResourceId": { "target": "com.amazonaws.securityhub#StringFilterList", "traits": { - "smithy.api#documentation": "

\n The identifier for the given resource type. For Amazon Web Services resources that are identified by \n Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, \n this is the identifier as defined by the Amazon Web Servicesservice that created the resource. \n For non-Amazon Web Services resources, this is a unique identifier that is associated with the \n resource.\n

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 100 items.\n \t

" + "smithy.api#documentation": "

\n The identifier for the given resource type. For Amazon Web Services resources that are identified by \n Amazon Resource Names (ARNs), this is the ARN. For Amazon Web Services resources that lack ARNs, \n this is the identifier as defined by the Amazon Web Services service that created the resource. \n For non-Amazon Web Services resources, this is a unique identifier that is associated with the \n resource.\n

\n

\n \t\tArray Members: Minimum number of 1 item. Maximum number of 100 items.\n \t

" } }, "ResourcePartition": { @@ -2986,7 +2986,7 @@ "Lifecycle": { "target": "com.amazonaws.securityhub#AwsBackupBackupPlanLifecycleDetails", "traits": { - "smithy.api#documentation": "

Defines when a protected resource is transitioned to cold storage and when it expires.\n Backup transitions and expires backups automatically according to the\n lifecycle that you define. If you do not specify a lifecycle, Backup applies\n the lifecycle policy of the source backup to the destination backup.

\n

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days.

" + "smithy.api#documentation": "

Defines when a protected resource is transitioned to cold storage and when it expires.\n Backup transitions and expires backups automatically according to the\n lifecycle that you define. If you don't specify a lifecycle, Backup applies\n the lifecycle policy of the source backup to the destination backup.

\n

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days.

" } } }, @@ -3054,7 +3054,7 @@ "Lifecycle": { "target": "com.amazonaws.securityhub#AwsBackupBackupPlanLifecycleDetails", "traits": { - "smithy.api#documentation": "

Defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. If you do not specify a lifecycle, Backup applies the lifecycle policy of the source backup to the destination backup.

\n

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days.

" + "smithy.api#documentation": "

Defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and expires backups automatically according to the lifecycle that you define. If you don't specify a lifecycle, Backup applies the lifecycle policy of the source backup to the destination backup.

\n

Backups transitioned to cold storage must be stored in cold storage for a minimum of 90 days.

" } } }, @@ -3086,7 +3086,7 @@ "EncryptionKeyArn": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The unique ARN associated with the server-side encryption key. You can specify a key to encrypt your backups from services that support \nfull Backup management. If you do not specify a key, Backup creates an KMS key for you by default.\n

" + "smithy.api#documentation": "

The unique ARN associated with the server-side encryption key. You can specify a key to encrypt your backups from services that support \nfull Backup management. If you don't specify a key, Backup creates an KMS key for you by default.\n

" } }, "Notifications": { @@ -9342,7 +9342,7 @@ "SchedulingStrategy": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The scheduling strategy to use for the service.

\n

The REPLICA scheduling strategy places and maintains the desired number of tasks across the cluster. By default, the service scheduler spreads tasks across Availability Zones. Task placement strategies and constraints are used to customize task placement decisions.

\n

The DAEMON scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that are specified in the cluster. The service scheduler also evaluates the task placement constraints for running tasks and stops tasks that do not meet the placement constraints.

\n

Valid values: REPLICA | DAEMON\n

" + "smithy.api#documentation": "

The scheduling strategy to use for the service.

\n

The REPLICA scheduling strategy places and maintains the desired number of tasks across the cluster. By default, the service scheduler spreads tasks across Availability Zones. Task placement strategies and constraints are used to customize task placement decisions.

\n

The DAEMON scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that are specified in the cluster. The service scheduler also evaluates the task placement constraints for running tasks and stops tasks that don't meet the placement constraints.

\n

Valid values: REPLICA | DAEMON\n

" } }, "ServiceArn": { @@ -15149,7 +15149,7 @@ "DBName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The meaning of this parameter differs according to the database engine you use.

\n

\n MySQL, MariaDB, SQL Server, PostgreSQL\n

\n

Contains the name of the initial database of this instance that was provided at create\n time, if one was specified when the DB instance was created. This same name is returned for\n the life of the DB instance.

\n

\n Oracle\n

\n

Contains the Oracle System ID (SID) of the created DB instance. Not shown when the\n returned parameters do not apply to an Oracle DB instance.

" + "smithy.api#documentation": "

The meaning of this parameter differs according to the database engine you use.

\n

\n MySQL, MariaDB, SQL Server, PostgreSQL\n

\n

Contains the name of the initial database of this instance that was provided at create\n time, if one was specified when the DB instance was created. This same name is returned for\n the life of the DB instance.

\n

\n Oracle\n

\n

Contains the Oracle System ID (SID) of the created DB instance. Not shown when the\n returned parameters don't apply to an Oracle DB instance.

" } }, "DeletionProtection": { @@ -18983,7 +18983,7 @@ "WorkflowStatus": { "target": "com.amazonaws.securityhub#StringFilterList", "traits": { - "smithy.api#documentation": "

The status of the investigation into a finding. Allowed values are the following.

\n
    \n
  • \n

    \n NEW - The initial state of a finding, before it is reviewed.

    \n

    Security Hub also resets the workflow status from NOTIFIED or\n RESOLVED to NEW in the following cases:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to ACTIVE.

      \n
    • \n
    • \n

      \n Compliance.Status changes from PASSED to either WARNING,\n FAILED, or NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n NOTIFIED - Indicates that the resource owner has been notified about\n the security issue. Used when the initial reviewer is not the resource owner, and\n needs intervention from the resource owner.

    \n

    If one of the following occurs, the workflow status is changed automatically from\n NOTIFIED to NEW:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      \n Compliance.Status changes from PASSED to FAILED,\n WARNING, or NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is\n needed.

    \n

    The workflow status of a SUPPRESSED finding does not change if\n RecordState changes from ARCHIVED to\n ACTIVE.

    \n
  • \n
  • \n

    \n RESOLVED - The finding was reviewed and remediated and is now\n considered resolved.

    \n

    The finding remains RESOLVED unless one of the following occurs:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      \n Compliance.Status changes from PASSED to FAILED,\n WARNING, or NOT_AVAILABLE.

      \n
    • \n
    \n

    In those cases, the workflow status is automatically reset to NEW.

    \n

    For findings from controls, if Compliance.Status is PASSED,\n then Security Hub automatically sets the workflow status to RESOLVED.

    \n
  • \n
" + "smithy.api#documentation": "

The status of the investigation into a finding. Allowed values are the following.

\n
    \n
  • \n

    \n NEW - The initial state of a finding, before it is reviewed.

    \n

    Security Hub also resets the workflow status from NOTIFIED or\n RESOLVED to NEW in the following cases:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to ACTIVE.

      \n
    • \n
    • \n

      \n Compliance.Status changes from PASSED to either WARNING,\n FAILED, or NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n NOTIFIED - Indicates that the resource owner has been notified about\n the security issue. Used when the initial reviewer is not the resource owner, and\n needs intervention from the resource owner.

    \n

    If one of the following occurs, the workflow status is changed automatically from\n NOTIFIED to NEW:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      \n Compliance.Status changes from PASSED to FAILED,\n WARNING, or NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n SUPPRESSED - Indicates that you reviewed the finding and don't believe that any action is\n needed.

    \n

    The workflow status of a SUPPRESSED finding does not change if\n RecordState changes from ARCHIVED to\n ACTIVE.

    \n
  • \n
  • \n

    \n RESOLVED - The finding was reviewed and remediated and is now\n considered resolved.

    \n

    The finding remains RESOLVED unless one of the following occurs:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      \n Compliance.Status changes from PASSED to FAILED,\n WARNING, or NOT_AVAILABLE.

      \n
    • \n
    \n

    In those cases, the workflow status is automatically reset to NEW.

    \n

    For findings from controls, if Compliance.Status is PASSED,\n then Security Hub automatically sets the workflow status to RESOLVED.

    \n
  • \n
" } }, "RecordState": { @@ -19082,7 +19082,7 @@ "ComplianceSecurityControlId": { "target": "com.amazonaws.securityhub#StringFilterList", "traits": { - "smithy.api#documentation": "

\n The unique identifier of a control across standards. Values for this field typically consist of an \n Amazon Web Servicesservice and a number, such as APIGateway.5.\n

" + "smithy.api#documentation": "

\n The unique identifier of a control across standards. Values for this field typically consist of an \n Amazon Web Services service and a number, such as APIGateway.5.\n

" } }, "ComplianceAssociatedStandardsId": { @@ -20188,13 +20188,13 @@ "OverrideAction": { "target": "com.amazonaws.securityhub#WafOverrideAction", "traits": { - "smithy.api#documentation": "

Use the OverrideAction to test your RuleGroup.

\n

Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to\n None, the RuleGroup blocks a request if any individual rule in the RuleGroup\n matches the request and is configured to block that request.

\n

However, if you first want to test the RuleGroup,\n set the OverrideAction to Count. The RuleGroup\n then overrides any block action specified by individual rules contained within the group.\n Instead of blocking matching requests, those requests are counted.

\n

\n ActivatedRule|OverrideAction applies only when updating or\n adding a RuleGroup\n to a web ACL. In this case you do not use ActivatedRule\n Action. For all other update requests,\n ActivatedRule\n Action is used instead of ActivatedRule\n OverrideAction.

" + "smithy.api#documentation": "

Use the OverrideAction to test your RuleGroup.

\n

Any rule in a RuleGroup can potentially block a request. If you set the OverrideAction to\n None, the RuleGroup blocks a request if any individual rule in the RuleGroup\n matches the request and is configured to block that request.

\n

However, if you first want to test the RuleGroup,\n set the OverrideAction to Count. The RuleGroup\n then overrides any block action specified by individual rules contained within the group.\n Instead of blocking matching requests, those requests are counted.

\n

\n ActivatedRule|OverrideAction applies only when updating or\n adding a RuleGroup\n to a web ACL. In this case you don't use ActivatedRule\n Action. For all other update requests,\n ActivatedRule\n Action is used instead of ActivatedRule\n OverrideAction.

" } }, "Priority": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

Specifies the order in which the rules in a web\n ACL are evaluated. Rules with a lower value for Priority are\n evaluated before rules with a higher value. The value must be a unique integer. If you add\n multiple rules to a web ACL, the values do not need to be consecutive.

" + "smithy.api#documentation": "

Specifies the order in which the rules in a web\n ACL are evaluated. Rules with a lower value for Priority are\n evaluated before rules with a higher value. The value must be a unique integer. If you add\n multiple rules to a web ACL, the values don't need to be consecutive.

" } }, "RuleId": { @@ -21632,7 +21632,7 @@ } ], "traits": { - "smithy.api#documentation": "

Used by Security Hub customers to update information about their investigation into a finding.\n Requested by administrator accounts or member accounts. Administrator accounts can update findings for\n their account and their member accounts. Member accounts can update findings for their\n account.

\n

Updates from BatchUpdateFindings do not affect the value of\n UpdatedAt for a finding.

\n

Administrator and member accounts can use BatchUpdateFindings to update the\n following finding fields and objects.

\n
    \n
  • \n

    \n Confidence\n

    \n
  • \n
  • \n

    \n Criticality\n

    \n
  • \n
  • \n

    \n Note\n

    \n
  • \n
  • \n

    \n RelatedFindings\n

    \n
  • \n
  • \n

    \n Severity\n

    \n
  • \n
  • \n

    \n Types\n

    \n
  • \n
  • \n

    \n UserDefinedFields\n

    \n
  • \n
  • \n

    \n VerificationState\n

    \n
  • \n
  • \n

    \n Workflow\n

    \n
  • \n
\n

You can configure IAM policies to restrict access to fields and field values. For\n example, you might not want member accounts to be able to suppress findings or change the\n finding severity. See Configuring access to BatchUpdateFindings in the\n Security Hub User Guide.

", + "smithy.api#documentation": "

Used by Security Hub customers to update information about their investigation into a finding.\n Requested by administrator accounts or member accounts. Administrator accounts can update findings for\n their account and their member accounts. Member accounts can update findings for their\n account.

\n

Updates from BatchUpdateFindings don't affect the value of\n UpdatedAt for a finding.

\n

Administrator and member accounts can use BatchUpdateFindings to update the\n following finding fields and objects.

\n
    \n
  • \n

    \n Confidence\n

    \n
  • \n
  • \n

    \n Criticality\n

    \n
  • \n
  • \n

    \n Note\n

    \n
  • \n
  • \n

    \n RelatedFindings\n

    \n
  • \n
  • \n

    \n Severity\n

    \n
  • \n
  • \n

    \n Types\n

    \n
  • \n
  • \n

    \n UserDefinedFields\n

    \n
  • \n
  • \n

    \n VerificationState\n

    \n
  • \n
  • \n

    \n Workflow\n

    \n
  • \n
\n

You can configure IAM policies to restrict access to fields and field values. For\n example, you might not want member accounts to be able to suppress findings or change the\n finding severity. See Configuring access to BatchUpdateFindings in the\n Security Hub User Guide.

", "smithy.api#examples": [ { "title": "To update Security Hub findings", @@ -22198,7 +22198,7 @@ "SecurityControlId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n Typically provides the unique identifier of a control across standards. For Security Hub controls, this field consists of an \n Amazon Web Servicesservice and a unique number, such as APIGateway.5.\n

" + "smithy.api#documentation": "

\n Typically provides the unique identifier of a control across standards. For Security Hub controls, this field consists of an \n Amazon Web Services service and a unique number, such as APIGateway.5.\n

" } }, "AssociatedStandards": { @@ -23058,7 +23058,7 @@ } ], "traits": { - "smithy.api#documentation": "

Used to enable finding aggregation. Must be called from the aggregation Region.

\n

For more details about cross-Region replication, see Configuring finding aggregation in the Security Hub User Guide.\n

", + "smithy.api#documentation": "\n

The aggregation Region is now called the home Region.

\n
\n

Used to enable cross-Region aggregation. This operation can be invoked from the home Region only.

\n

For information about how cross-Region aggregation works, see Understanding cross-Region aggregation in Security Hub in the Security Hub User Guide.\n

", "smithy.api#examples": [ { "title": "To enable cross-Region aggregation", @@ -23102,7 +23102,7 @@ "Regions": { "target": "com.amazonaws.securityhub#StringList", "traits": { - "smithy.api#documentation": "

If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.

\n

If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region.\n

\n

An InvalidInputException error results if you populate this field while RegionLinkingMode is \n NO_REGIONS.

" + "smithy.api#documentation": "

If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region.

\n

If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region.\n

\n

An InvalidInputException error results if you populate this field while RegionLinkingMode is \n NO_REGIONS.

" } } }, @@ -23116,13 +23116,13 @@ "FindingAggregatorArn": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and stop finding aggregation.

" + "smithy.api#documentation": "

The ARN of the finding aggregator. You use the finding aggregator ARN to retrieve details for, update, and stop cross-Region aggregation.

" } }, "FindingAggregationRegion": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The aggregation Region.

" + "smithy.api#documentation": "

The home Region. Findings generated in linked Regions are replicated and sent to the home Region.

" } }, "RegionLinkingMode": { @@ -23280,7 +23280,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a member association in Security Hub between the specified accounts and the account\n used to make the request, which is the administrator account. If you are integrated with\n Organizations, then the administrator account is designated by the organization management account.

\n

\n CreateMembers is always used to add accounts that are not organization\n members.

\n

For accounts that are managed using Organizations, CreateMembers is only used\n in the following cases:

\n
    \n
  • \n

    Security Hub is not configured to automatically add new organization accounts.

    \n
  • \n
  • \n

    The account was disassociated or deleted in Security Hub.

    \n
  • \n
\n

This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you\n can use the EnableSecurityHub operation.

\n

For accounts that are not organization members, you create the account association and\n then send an invitation to the member account. To send the invitation, you use the\n InviteMembers operation. If the account owner accepts\n the invitation, the account becomes a member account in Security Hub.

\n

Accounts that are managed using Organizations do not receive an invitation. They\n automatically become a member account in Security Hub.

\n
    \n
  • \n

    If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account.

    \n
  • \n
  • \n

    For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls.

    \n
  • \n
\n

A permissions policy is added that permits the administrator account to view the findings\n generated in the member account.

\n

To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

", + "smithy.api#documentation": "

Creates a member association in Security Hub between the specified accounts and the account\n used to make the request, which is the administrator account. If you are integrated with\n Organizations, then the administrator account is designated by the organization management account.

\n

\n CreateMembers is always used to add accounts that are not organization\n members.

\n

For accounts that are managed using Organizations, CreateMembers is only used\n in the following cases:

\n
    \n
  • \n

    Security Hub is not configured to automatically add new organization accounts.

    \n
  • \n
  • \n

    The account was disassociated or deleted in Security Hub.

    \n
  • \n
\n

This action can only be used by an account that has Security Hub enabled. To enable Security Hub, you\n can use the EnableSecurityHub operation.

\n

For accounts that are not organization members, you create the account association and\n then send an invitation to the member account. To send the invitation, you use the\n InviteMembers operation. If the account owner accepts\n the invitation, the account becomes a member account in Security Hub.

\n

Accounts that are managed using Organizations don't receive an invitation. They\n automatically become a member account in Security Hub.

\n
    \n
  • \n

    If the organization account does not have Security Hub enabled, then Security Hub and the default standards are automatically enabled. Note that Security Hub cannot be enabled automatically for the organization management account. The organization management account must enable Security Hub before the administrator account enables it as a member account.

    \n
  • \n
  • \n

    For organization accounts that already have Security Hub enabled, Security Hub does not make any other changes to those accounts. It does not change their enabled standards or controls.

    \n
  • \n
\n

A permissions policy is added that permits the administrator account to view the findings\n generated in the member account.

\n

To remove the association between the administrator and member accounts, use the DisassociateFromMasterAccount or DisassociateMembers operation.

", "smithy.api#examples": [ { "title": "To add a member account", @@ -23560,7 +23560,7 @@ } ], "traits": { - "smithy.api#documentation": "

Declines invitations to become a member account.

\n

A prospective member account uses this operation to decline an invitation to become a member.

\n

This operation is only called by member accounts that aren't part of an organization.\n Organization accounts don't receive invitations.

", + "smithy.api#documentation": "\n

We recommend using Organizations instead of Security Hub invitations to manage your member accounts. \n For information, see Managing Security Hub administrator and member accounts with Organizations \n in the Security Hub User Guide.

\n
\n

Declines invitations to become a Security Hub member account.

\n

A prospective member account uses this operation to decline an invitation to become a member.

\n

Only member accounts that aren't part of an Amazon Web Services organization should use this operation.\n Organization accounts don't receive invitations.

", "smithy.api#examples": [ { "title": "To decline invitation to become a member account", @@ -23789,7 +23789,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes a finding aggregator. When you delete the finding aggregator, you stop finding aggregation.

\n

When you stop finding aggregation, findings that were already aggregated to the aggregation Region are still visible from the aggregation Region. New findings and finding updates are not aggregated.\n

", + "smithy.api#documentation": "\n

The aggregation Region is now called the home Region.

\n
\n

Deletes a finding aggregator. When you delete the finding aggregator, you stop cross-Region aggregation. Finding replication stops \noccurring from the linked Regions to the home Region.

\n

When you stop cross-Region aggregation, findings that were already replicated and sent to the home Region are still visible from \n the home Region. However, new findings and finding updates are no longer replicated and sent to the home Region.\n

", "smithy.api#examples": [ { "title": "To delete a finding aggregator", @@ -23933,7 +23933,7 @@ } ], "traits": { - "smithy.api#documentation": "

Deletes invitations received by the Amazon Web Services account to become a member account.

\n

A Security Hub administrator account can use this operation to delete invitations sent to one or more member accounts.

\n

This operation is only used to delete invitations that are sent to member accounts that aren't part of an organization.\n Organization accounts don't receive invitations.

", + "smithy.api#documentation": "\n

We recommend using Organizations instead of Security Hub invitations to manage your member accounts. \n For information, see Managing Security Hub administrator and member accounts with Organizations \n in the Security Hub User Guide.

\n
\n

Deletes invitations to become a Security Hub member account.

\n

A Security Hub administrator account can use this operation to delete invitations sent to one or more prospective member accounts.

\n

This operation is only used to delete invitations that are sent to prospective member accounts that aren't part of an Amazon Web Services organization.\n Organization accounts don't receive invitations.

", "smithy.api#examples": [ { "title": "To delete a custom insight", @@ -24370,7 +24370,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns information about product integrations in Security Hub.

\n

You can optionally provide an integration ARN. If you provide an integration ARN, then\n the results only include that integration.

\n

If you do not provide an integration ARN, then the results include all of the available\n product integrations.

", + "smithy.api#documentation": "

Returns information about product integrations in Security Hub.

\n

You can optionally provide an integration ARN. If you provide an integration ARN, then\n the results only include that integration.

\n

If you don't provide an integration ARN, then the results include all of the available\n product integrations.

", "smithy.api#examples": [ { "title": "To get information about Security Hub integrations", @@ -25291,7 +25291,7 @@ "EnableDefaultStandards": { "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "

Whether to enable the security standards that Security Hub has designated as automatically\n enabled. If you do not provide a value for EnableDefaultStandards, it is set\n to true. To not enable the automatically enabled standards, set\n EnableDefaultStandards to false.

" + "smithy.api#documentation": "

Whether to enable the security standards that Security Hub has designated as automatically\n enabled. If you don't provide a value for EnableDefaultStandards, it is set\n to true. To not enable the automatically enabled standards, set\n EnableDefaultStandards to false.

" } }, "ControlFindingGenerator": { @@ -25428,7 +25428,7 @@ } }, "traits": { - "smithy.api#documentation": "

A finding aggregator. A finding aggregator contains the configuration for finding aggregation.

" + "smithy.api#documentation": "

A finding aggregator is a Security Hub resource that specifies cross-Region aggregation settings, including the \nhome Region and any linked Regions.

" } }, "com.amazonaws.securityhub#FindingAggregatorList": { @@ -25458,7 +25458,7 @@ "UpdateSource": { "target": "com.amazonaws.securityhub#FindingHistoryUpdateSource", "traits": { - "smithy.api#documentation": "

Identifies the source of the event that changed the finding. For example, an integrated\n Amazon Web Servicesservice or third-party partner integration may call \n BatchImportFindings\n , or an Security Hub customer\n may call \n BatchUpdateFindings\n .

" + "smithy.api#documentation": "

Identifies the source of the event that changed the finding. For example, an integrated\n Amazon Web Services service or third-party partner integration may call \n BatchImportFindings\n , or an Security Hub customer\n may call \n BatchUpdateFindings\n .

" } }, "Updates": { @@ -25516,7 +25516,7 @@ "Type": { "target": "com.amazonaws.securityhub#FindingHistoryUpdateSourceType", "traits": { - "smithy.api#documentation": "

\n Describes the type of finding change event, such as a call to \n BatchImportFindings\n (by an integrated Amazon Web Servicesservice or third party partner integration) or \n BatchUpdateFindings\n (by a Security Hub customer). \n

" + "smithy.api#documentation": "

\n Describes the type of finding change event, such as a call to \n BatchImportFindings\n (by an integrated Amazon Web Services service or third party partner integration) or \n BatchUpdateFindings\n (by a Security Hub customer). \n

" } }, "Identity": { @@ -26222,7 +26222,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the current finding aggregation configuration.

", + "smithy.api#documentation": "\n

The aggregation Region is now called the home Region.

\n
\n

Returns the current configuration in the calling account for cross-Region aggregation. A finding aggregator is a resource that establishes \nthe home Region and any linked Regions.

", "smithy.api#examples": [ { "title": "To get cross-Region aggregation details", @@ -26276,7 +26276,7 @@ "FindingAggregationRegion": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The aggregation Region.

" + "smithy.api#documentation": "

The home Region. Findings generated in linked Regions are replicated and sent to the home Region.

" } }, "RegionLinkingMode": { @@ -26415,7 +26415,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns a list of findings that match the specified criteria.

\n

If finding aggregation is enabled, then when you call GetFindings from the aggregation Region, the results include all of the matching findings from both the aggregation Region and the linked Regions.

", + "smithy.api#documentation": "

Returns a list of findings that match the specified criteria.

\n

If cross-Region aggregation is enabled, then when you call GetFindings from the home Region, the results include all of the matching findings from both the home Region and linked Regions.

", "smithy.api#examples": [ { "title": "To get a list of findings", @@ -26761,7 +26761,7 @@ "InsightArns": { "target": "com.amazonaws.securityhub#ArnList", "traits": { - "smithy.api#documentation": "

The ARNs of the insights to describe. If you do not provide any insight ARNs, then\n GetInsights returns all of your custom insights. It does not return any\n managed insights.

" + "smithy.api#documentation": "

The ARNs of the insights to describe. If you don't provide any insight ARNs, then\n GetInsights returns all of your custom insights. It does not return any\n managed insights.

" } }, "NextToken": { @@ -26826,7 +26826,7 @@ } ], "traits": { - "smithy.api#documentation": "

Returns the count of all Security Hub membership invitations that were sent to the\n current member account, not including the currently accepted invitation.

", + "smithy.api#documentation": "\n

We recommend using Organizations instead of Security Hub invitations to manage your member accounts. \n For information, see Managing Security Hub administrator and member accounts with Organizations \n in the Security Hub User Guide.

\n
\n

Returns the count of all Security Hub membership invitations that were sent to the\n calling member account, not including the currently accepted invitation.

", "smithy.api#examples": [ { "title": "To get a count of membership invitations", @@ -27463,7 +27463,7 @@ } ], "traits": { - "smithy.api#documentation": "

Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that\n the invitation is sent from.

\n

This operation is only used to invite accounts that do not belong to an organization.\n Organization accounts do not receive invitations.

\n

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

\n

When the account owner enables Security Hub and accepts the invitation to become a member\n account, the administrator account can view the findings generated from the member account.

", + "smithy.api#documentation": "\n

We recommend using Organizations instead of Security Hub invitations to manage your member accounts. \n For information, see Managing Security Hub administrator and member accounts with Organizations \n in the Security Hub User Guide.

\n
\n

Invites other Amazon Web Services accounts to become member accounts for the Security Hub administrator account that\n the invitation is sent from.

\n

This operation is only used to invite accounts that don't belong to an Amazon Web Services organization.\n Organization accounts don't receive invitations.

\n

Before you can use this action to invite a member, you must first use the CreateMembers action to create the member account in Security Hub.

\n

When the account owner enables Security Hub and accepts the invitation to become a member\n account, the administrator account can view the findings generated in the member account.

", "smithy.api#examples": [ { "title": "To invite accounts to become members", @@ -28078,7 +28078,7 @@ } ], "traits": { - "smithy.api#documentation": "

If finding aggregation is enabled, then ListFindingAggregators returns the ARN of the finding aggregator. You can run this operation from any Region.

", + "smithy.api#documentation": "

If cross-Region aggregation is enabled, then ListFindingAggregators returns the Amazon Resource Name (ARN) \nof the finding aggregator. You can run this operation from any Amazon Web Services Region.

", "smithy.api#examples": [ { "title": "To update the enablement status of a standard control", @@ -28170,7 +28170,7 @@ } ], "traits": { - "smithy.api#documentation": "

Lists all Security Hub membership invitations that were sent to the current Amazon Web Services account.

\n

This operation is only used by accounts that are managed by invitation.\n Accounts that are managed using the integration with Organizations do not receive invitations.

", + "smithy.api#documentation": "\n

We recommend using Organizations instead of Security Hub invitations to manage your member accounts. \n For information, see Managing Security Hub administrator and member accounts with Organizations \n in the Security Hub User Guide.

\n
\n

Lists all Security Hub membership invitations that were sent to the calling account.

\n

Only accounts that are managed by invitation can use this operation.\n Accounts that are managed using the integration with Organizations don't receive invitations.

", "smithy.api#http": { "method": "GET", "uri": "/invitations", @@ -29732,7 +29732,7 @@ "SecurityHub": { "target": "com.amazonaws.securityhub#SecurityHubPolicy", "traits": { - "smithy.api#documentation": "

\n The Amazon Web Servicesservice that the configuration policy applies to.\n

" + "smithy.api#documentation": "

\n The Amazon Web Services service that the configuration policy applies to.\n

" } } }, @@ -30927,7 +30927,7 @@ "DestinationPrefixListId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

\n The prefix of the destination Amazon Web Servicesservice.\n

" + "smithy.api#documentation": "

\n The prefix of the destination Amazon Web Services service.\n

" } }, "EgressOnlyInternetGatewayId": { @@ -31534,7 +31534,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

\n The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Servicesservice name and a \n number, such as APIGateway.3.\n

", + "smithy.api#documentation": "

\n The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Services service name and a \n number, such as APIGateway.3.\n

", "smithy.api#required": {} } }, @@ -31642,7 +31642,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

\n The unique identifier of a security control across standards. Values for this field typically consist of an \n Amazon Web Servicesservice name and a number (for example, APIGateway.3). This parameter differs from \n SecurityControlArn, which is a unique Amazon Resource Name (ARN) assigned to a control. The \n ARN references the security control ID (for example, arn:aws:securityhub:eu-central-1:123456789012:security-control/APIGateway.3).\n

", + "smithy.api#documentation": "

\n The unique identifier of a security control across standards. Values for this field typically consist of an \n Amazon Web Services service name and a number (for example, APIGateway.3). This parameter differs from \n SecurityControlArn, which is a unique Amazon Resource Name (ARN) assigned to a control. The \n ARN references the security control ID (for example, arn:aws:securityhub:eu-central-1:123456789012:security-control/APIGateway.3).\n

", "smithy.api#required": {} } }, @@ -32038,7 +32038,7 @@ "name": "securityhub" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps \n you assess your Amazon Web Services environment against security industry standards and best practices.

\n

Security Hub collects security data across Amazon Web Services accounts, Amazon Web Servicesservices, and \n supported third-party products and helps you analyze your security trends and identify the highest priority security \n issues.

\n

To help you manage the security state of your organization, Security Hub supports multiple security standards. \n These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, \n and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data \n Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes \n several security controls, each of which represents a security best practice. Security Hub runs checks against \n security controls and generates control findings to help you assess your compliance against security best practices.

\n

In addition to generating control findings, Security Hub also receives findings from other Amazon Web Servicesservices, \n such as Amazon GuardDuty and Amazon Inspector, and \n supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You \n can also send Security Hub findings to other Amazon Web Servicesservices and supported third-party products.

\n

Security Hub offers automation features that help you triage and remediate security issues. For example, \n you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with \n Amazon EventBridge to trigger automatic responses to specific findings.

\n

This guide, the Security Hub API Reference, provides\n information about the Security Hub API. This includes supported resources, HTTP methods, parameters,\n and schemas. If you're new to Security Hub, you might find it helpful to also review the \n Security Hub User Guide\n . The\n user guide explains key concepts and provides procedures\n that demonstrate how to use Security Hub features. It also provides information about topics such as\n integrating Security Hub with other Amazon Web Servicesservices.

\n

In addition to interacting with Security Hub by making calls to the Security Hub API, you can\n use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools \n and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell,\n Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to\n Security Hub and other Amazon Web Servicesservices . They also handle tasks such as signing requests, \n managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools\n and SDKs, see Tools to Build on Amazon Web Services.

\n

With the exception of operations that are related to central configuration, Security Hub API requests are executed only in\n the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change\n that results from the operation is applied only to that Region. To make the same change in\n other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, \nAPI requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of \ncentral configuration operations, see the Central configuration \nterms and concepts section of the Security Hub User Guide.

\n

The following throttling limits apply to Security Hub API operations.

\n
    \n
  • \n

    \n BatchEnableStandards - RateLimit of 1 request per\n second. BurstLimit of 1 request per second.

    \n
  • \n
  • \n

    \n GetFindings - RateLimit of 3 requests per second.\n BurstLimit of 6 requests per second.

    \n
  • \n
  • \n

    \n BatchImportFindings - RateLimit of 10 requests per second.\n BurstLimit of 30 requests per second.

    \n
  • \n
  • \n

    \n BatchUpdateFindings - RateLimit of 10 requests per second.\n BurstLimit of 30 requests per second.

    \n
  • \n
  • \n

    \n UpdateStandardsControl - RateLimit of 1 request per\n second. BurstLimit of 5 requests per second.

    \n
  • \n
  • \n

    All other operations - RateLimit of 10 requests per second.\n BurstLimit of 30 requests per second.

    \n
  • \n
", + "smithy.api#documentation": "

Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps \n you assess your Amazon Web Services environment against security industry standards and best practices.

\n

Security Hub collects security data across Amazon Web Services accounts, Amazon Web Services services, and \n supported third-party products and helps you analyze your security trends and identify the highest priority security \n issues.

\n

To help you manage the security state of your organization, Security Hub supports multiple security standards. \n These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, \n and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data \n Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes \n several security controls, each of which represents a security best practice. Security Hub runs checks against \n security controls and generates control findings to help you assess your compliance against security best practices.

\n

In addition to generating control findings, Security Hub also receives findings from other Amazon Web Services services, \n such as Amazon GuardDuty and Amazon Inspector, and \n supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You \n can also send Security Hub findings to other Amazon Web Services services and supported third-party products.

\n

Security Hub offers automation features that help you triage and remediate security issues. For example, \n you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with \n Amazon EventBridge to trigger automatic responses to specific findings.

\n

This guide, the Security Hub API Reference, provides\n information about the Security Hub API. This includes supported resources, HTTP methods, parameters,\n and schemas. If you're new to Security Hub, you might find it helpful to also review the \n Security Hub User Guide\n . The\n user guide explains key concepts and provides procedures\n that demonstrate how to use Security Hub features. It also provides information about topics such as\n integrating Security Hub with other Amazon Web Services services.

\n

In addition to interacting with Security Hub by making calls to the Security Hub API, you can\n use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools \n and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell,\n Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to\n Security Hub and other Amazon Web Services services . They also handle tasks such as signing requests, \n managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools\n and SDKs, see Tools to Build on Amazon Web Services.

\n

With the exception of operations that are related to central configuration, Security Hub API requests are executed only in\n the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change\n that results from the operation is applied only to that Region. To make the same change in\n other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, \nAPI requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of \ncentral configuration operations, see the Central configuration \nterms and concepts section of the Security Hub User Guide.

\n

The following throttling limits apply to Security Hub API operations.

\n
    \n
  • \n

    \n BatchEnableStandards - RateLimit of 1 request per\n second. BurstLimit of 1 request per second.

    \n
  • \n
  • \n

    \n GetFindings - RateLimit of 3 requests per second.\n BurstLimit of 6 requests per second.

    \n
  • \n
  • \n

    \n BatchImportFindings - RateLimit of 10 requests per second.\n BurstLimit of 30 requests per second.

    \n
  • \n
  • \n

    \n BatchUpdateFindings - RateLimit of 10 requests per second.\n BurstLimit of 30 requests per second.

    \n
  • \n
  • \n

    \n UpdateStandardsControl - RateLimit of 1 request per\n second. BurstLimit of 5 requests per second.

    \n
  • \n
  • \n

    All other operations - RateLimit of 10 requests per second.\n BurstLimit of 30 requests per second.

    \n
  • \n
", "smithy.api#title": "AWS SecurityHub", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -33123,13 +33123,13 @@ "Label": { "target": "com.amazonaws.securityhub#SeverityLabel", "traits": { - "smithy.api#documentation": "

The severity value of the finding. The allowed values are the following.

\n
    \n
  • \n

    \n INFORMATIONAL - No issue was found.

    \n
  • \n
  • \n

    \n LOW - The issue does not require action on its own.

    \n
  • \n
  • \n

    \n MEDIUM - The issue must be addressed but not urgently.

    \n
  • \n
  • \n

    \n HIGH - The issue must be addressed as a priority.

    \n
  • \n
  • \n

    \n CRITICAL - The issue must be remediated immediately to avoid it\n escalating.

    \n
  • \n
\n

If you provide Normalized and do not provide Label, then\n Label is set automatically as follows.

\n
    \n
  • \n

    0 - INFORMATIONAL\n

    \n
  • \n
  • \n

    1–39 - LOW\n

    \n
  • \n
  • \n

    40–69 - MEDIUM\n

    \n
  • \n
  • \n

    70–89 - HIGH\n

    \n
  • \n
  • \n

    90–100 - CRITICAL\n

    \n
  • \n
" + "smithy.api#documentation": "

The severity value of the finding. The allowed values are the following.

\n
    \n
  • \n

    \n INFORMATIONAL - No issue was found.

    \n
  • \n
  • \n

    \n LOW - The issue does not require action on its own.

    \n
  • \n
  • \n

    \n MEDIUM - The issue must be addressed but not urgently.

    \n
  • \n
  • \n

    \n HIGH - The issue must be addressed as a priority.

    \n
  • \n
  • \n

    \n CRITICAL - The issue must be remediated immediately to avoid it\n escalating.

    \n
  • \n
\n

If you provide Normalized and don't provide Label, then\n Label is set automatically as follows.

\n
    \n
  • \n

    0 - INFORMATIONAL\n

    \n
  • \n
  • \n

    1–39 - LOW\n

    \n
  • \n
  • \n

    40–69 - MEDIUM\n

    \n
  • \n
  • \n

    70–89 - HIGH\n

    \n
  • \n
  • \n

    90–100 - CRITICAL\n

    \n
  • \n
" } }, "Normalized": { "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "

Deprecated. The normalized severity of a finding.\n Instead of providing Normalized, provide Label.

\n

The value of Normalized can be an integer between 0 and 100.

\n

If you provide Label and do not provide Normalized, then\n Normalized is set automatically as follows.

\n
    \n
  • \n

    \n INFORMATIONAL - 0

    \n
  • \n
  • \n

    \n LOW - 1

    \n
  • \n
  • \n

    \n MEDIUM - 40

    \n
  • \n
  • \n

    \n HIGH - 70

    \n
  • \n
  • \n

    \n CRITICAL - 90

    \n
  • \n
" + "smithy.api#documentation": "

Deprecated. The normalized severity of a finding.\n Instead of providing Normalized, provide Label.

\n

The value of Normalized can be an integer between 0 and 100.

\n

If you provide Label and don't provide Normalized, then\n Normalized is set automatically as follows.

\n
    \n
  • \n

    \n INFORMATIONAL - 0

    \n
  • \n
  • \n

    \n LOW - 1

    \n
  • \n
  • \n

    \n MEDIUM - 40

    \n
  • \n
  • \n

    \n HIGH - 70

    \n
  • \n
  • \n

    \n CRITICAL - 90

    \n
  • \n
" } }, "Original": { @@ -33213,7 +33213,7 @@ "Normalized": { "target": "com.amazonaws.securityhub#RatioScale", "traits": { - "smithy.api#documentation": "

The normalized severity for the finding. This attribute is to be deprecated in favor of\n Label.

\n

If you provide Normalized and do not provide Label,\n Label is set automatically as follows.

\n
    \n
  • \n

    0 - INFORMATIONAL\n

    \n
  • \n
  • \n

    1–39 - LOW\n

    \n
  • \n
  • \n

    40–69 - MEDIUM\n

    \n
  • \n
  • \n

    70–89 - HIGH\n

    \n
  • \n
  • \n

    90–100 - CRITICAL\n

    \n
  • \n
" + "smithy.api#documentation": "

The normalized severity for the finding. This attribute is to be deprecated in favor of\n Label.

\n

If you provide Normalized and don't provide Label,\n Label is set automatically as follows.

\n
    \n
  • \n

    0 - INFORMATIONAL\n

    \n
  • \n
  • \n

    1–39 - LOW\n

    \n
  • \n
  • \n

    40–69 - MEDIUM\n

    \n
  • \n
  • \n

    70–89 - HIGH\n

    \n
  • \n
  • \n

    90–100 - CRITICAL\n

    \n
  • \n
" } }, "Product": { @@ -33492,7 +33492,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

\n The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Servicesservice \n name and a number, such as APIGateway.3.\n

", + "smithy.api#documentation": "

\n The unique identifier of a security control across standards. Values for this field typically consist of an Amazon Web Services service \n name and a number, such as APIGateway.3.\n

", "smithy.api#required": {} } }, @@ -33610,7 +33610,7 @@ "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

\n A unique standard-agnostic identifier for a control. Values for this field typically consist of an \n Amazon Web Servicesservice and a number, such as APIGateway.5. This field doesn't reference a specific standard.\n

", + "smithy.api#documentation": "

\n A unique standard-agnostic identifier for a control. Values for this field typically consist of an \n Amazon Web Services service and a number, such as APIGateway.5. This field doesn't reference a specific standard.\n

", "smithy.api#required": {} } }, @@ -35361,7 +35361,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the finding aggregation configuration. Used to update the Region linking mode and the list of included or excluded Regions. You cannot use UpdateFindingAggregator to change the aggregation Region.

\n

You must run UpdateFindingAggregator from the current aggregation Region.\n

", + "smithy.api#documentation": "\n

The aggregation Region is now called the home Region.

\n
\n

Updates cross-Region aggregation settings. You can use this operation to update the Region linking mode and the list \n of included or excluded Amazon Web Services Regions. However, you can't use this operation to change the home Region.

\n

You can invoke this operation from the current home Region only.\n

", "smithy.api#examples": [ { "title": "To update cross-Region aggregation settings", @@ -35414,7 +35414,7 @@ "Regions": { "target": "com.amazonaws.securityhub#StringList", "traits": { - "smithy.api#documentation": "

If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.

\n

If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do aggregate findings to the aggregation Region.

\n

An InvalidInputException error results if you populate this field while RegionLinkingMode is \n NO_REGIONS.

" + "smithy.api#documentation": "

If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that don't replicate and send findings to the home Region.

\n

If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions that do replicate and send findings to the home Region.

\n

An InvalidInputException error results if you populate this field while RegionLinkingMode is \n NO_REGIONS.

" } } }, @@ -35434,7 +35434,7 @@ "FindingAggregationRegion": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "

The aggregation Region.

" + "smithy.api#documentation": "

The home Region. Findings generated in linked Regions are replicated and sent to the home Region.

" } }, "RegionLinkingMode": { @@ -36343,7 +36343,7 @@ "Status": { "target": "com.amazonaws.securityhub#WorkflowStatus", "traits": { - "smithy.api#documentation": "

The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue.

\n

The allowed values are the following.

\n
    \n
  • \n

    \n NEW - The initial state of a finding, before it is reviewed.

    \n

    Security Hub also resets the workflow status from NOTIFIED or\n RESOLVED to NEW in the following cases:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      \n ComplianceStatus changes from PASSED to either\n WARNING, FAILED, or\n NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n NOTIFIED - Indicates that you notified the resource owner about the\n security issue. Used when the initial reviewer is not the resource owner, and needs\n intervention from the resource owner.

    \n
  • \n
  • \n

    \n SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated.

    \n
  • \n
  • \n

    \n RESOLVED - The finding was reviewed and remediated and is now\n considered resolved.

    \n
  • \n
" + "smithy.api#documentation": "

The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue.

\n

The allowed values are the following.

\n
    \n
  • \n

    \n NEW - The initial state of a finding, before it is reviewed.

    \n

    Security Hub also resets the workflow status from NOTIFIED or\n RESOLVED to NEW in the following cases:

    \n
      \n
    • \n

      \n RecordState changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      \n ComplianceStatus changes from PASSED to either\n WARNING, FAILED, or\n NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n NOTIFIED - Indicates that you notified the resource owner about the\n security issue. Used when the initial reviewer is not the resource owner, and needs\n intervention from the resource owner.

    \n
  • \n
  • \n

    \n SUPPRESSED - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated.

    \n
  • \n
  • \n

    \n RESOLVED - The finding was reviewed and remediated and is now\n considered resolved.

    \n
  • \n
" } } }, @@ -36426,7 +36426,7 @@ "Status": { "target": "com.amazonaws.securityhub#WorkflowStatus", "traits": { - "smithy.api#documentation": "

The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue.

\n

The allowed values are the following.

\n
    \n
  • \n

    \n NEW - The initial state of a finding, before it is reviewed.

    \n

    Security Hub also resets WorkFlowStatus from NOTIFIED or\n RESOLVED to NEW in the following cases:

    \n
      \n
    • \n

      The record state changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      The compliance status changes from PASSED to either\n WARNING, FAILED, or\n NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n NOTIFIED - Indicates that you notified the resource owner about the\n security issue. Used when the initial reviewer is not the resource owner, and needs\n intervention from the resource owner.

    \n
  • \n
  • \n

    \n RESOLVED - The finding was reviewed and remediated and is now\n considered resolved.

    \n
  • \n
  • \n

    \n SUPPRESSED - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated.

    \n
  • \n
" + "smithy.api#documentation": "

The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to SUPPRESSED or RESOLVED does not prevent a new finding for the same issue.

\n

The allowed values are the following.

\n
    \n
  • \n

    \n NEW - The initial state of a finding, before it is reviewed.

    \n

    Security Hub also resets WorkFlowStatus from NOTIFIED or\n RESOLVED to NEW in the following cases:

    \n
      \n
    • \n

      The record state changes from ARCHIVED to\n ACTIVE.

      \n
    • \n
    • \n

      The compliance status changes from PASSED to either\n WARNING, FAILED, or\n NOT_AVAILABLE.

      \n
    • \n
    \n
  • \n
  • \n

    \n NOTIFIED - Indicates that you notified the resource owner about the\n security issue. Used when the initial reviewer is not the resource owner, and needs\n intervention from the resource owner.

    \n
  • \n
  • \n

    \n RESOLVED - The finding was reviewed and remediated and is now\n considered resolved.

    \n
  • \n
  • \n

    \n SUPPRESSED - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated.

    \n
  • \n
" } } }, diff --git a/models/securitylake.json b/models/securitylake.json index 91aa6e7dc0..b1029f3591 100644 --- a/models/securitylake.json +++ b/models/securitylake.json @@ -59,7 +59,7 @@ "min": 1, "max": 1011 }, - "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-za-z0-9_/.\\-]{0,63}:[A-za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-za-z0-9_/.\\-]{0,127}$" + "smithy.api#pattern": "^arn:(aws|aws-us-gov|aws-cn):securitylake:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9_/.\\-]{0,63}:[A-Za-z0-9][A-Za-z0-9_/.\\-]{0,127}$" } }, "com.amazonaws.securitylake#AwsAccountId": { @@ -79,20 +79,20 @@ "principal": { "target": "com.amazonaws.securitylake#AwsPrincipal", "traits": { - "smithy.api#documentation": "

The AWS identity principal.

", + "smithy.api#documentation": "

The Amazon Web Services identity principal.

", "smithy.api#required": {} } }, "externalId": { "target": "com.amazonaws.securitylake#ExternalId", "traits": { - "smithy.api#documentation": "

The external ID used to estalish trust relationship with the AWS identity.

", + "smithy.api#documentation": "

The external ID used to establish trust relationship with the Amazon Web Services identity.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The AWS identity.

" + "smithy.api#documentation": "

The Amazon Web Services identity.

" } }, "com.amazonaws.securitylake#AwsLogSourceConfiguration": { @@ -114,7 +114,7 @@ "sourceName": { "target": "com.amazonaws.securitylake#AwsLogSourceName", "traits": { - "smithy.api#documentation": "

The name for a Amazon Web Services source. This must be a Regionally unique value.

", + "smithy.api#documentation": "

The name for a Amazon Web Services source.

", "smithy.api#required": {} } }, @@ -122,12 +122,12 @@ "target": "com.amazonaws.securitylake#AwsLogSourceVersion", "traits": { "smithy.api#default": "latest", - "smithy.api#documentation": "

The version for a Amazon Web Services source. This must be a Regionally unique value.

" + "smithy.api#documentation": "

The version for a Amazon Web Services source.

" } } }, "traits": { - "smithy.api#documentation": "

The Security Lake logs source configuration file describes the information needed to generate Security Lake logs.

" + "smithy.api#documentation": "

To add a natively-supported Amazon Web Services service as a log source, use these\n parameters to specify the configuration settings for the log source.

" } }, "com.amazonaws.securitylake#AwsLogSourceConfigurationList": { @@ -212,7 +212,7 @@ } }, "traits": { - "smithy.api#documentation": "

Amazon Security Lake can collect logs and events from natively-supported Amazon Web Services services.

" + "smithy.api#documentation": "

Amazon Security Lake can collect logs and events from natively-supported Amazon Web Services\n services.

" } }, "com.amazonaws.securitylake#AwsLogSourceResourceList": { @@ -315,7 +315,7 @@ "glue:CreateTable", "glue:CreateDatabase" ], - "smithy.api#documentation": "

Adds a natively supported Amazon Web Service as an Amazon Security Lake source. Enables\n source types for member accounts in required Amazon Web Services Regions, based on the\n parameters you specify. You can choose any source type in any Region for either accounts\n that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Service as a source, Security Lake starts collecting logs and events from it.

\n

You can use this API only to enable natively supported Amazon Web Services as a\n source. Use CreateCustomLogSource to enable data collection from a custom\n source.

", + "smithy.api#documentation": "

Adds a natively supported Amazon Web Services service as an Amazon Security Lake source. Enables\n source types for member accounts in required Amazon Web Services Regions, based on the\n parameters you specify. You can choose any source type in any Region for either accounts\n that are part of a trusted organization or standalone accounts. Once you add an Amazon Web Services service as a source, Security Lake starts collecting logs and events from it.

\n

You can use this API only to enable natively supported Amazon Web Services services as a\n source. Use CreateCustomLogSource to enable data collection from a custom\n source.

", "smithy.api#http": { "method": "POST", "uri": "/v1/datalake/logsources/aws", @@ -344,7 +344,7 @@ "failed": { "target": "com.amazonaws.securitylake#AccountList", "traits": { - "smithy.api#documentation": "

Lists all accounts in which enabling a natively supported Amazon Web Service as\n a Security Lake source failed. The failure occurred as these accounts are not part of an\n organization.

" + "smithy.api#documentation": "

Lists all accounts in which enabling a natively supported Amazon Web Services service as\n a Security Lake source failed. The failure occurred as these accounts are not part of an\n organization.

" } } }, @@ -414,7 +414,7 @@ "sourceName": { "target": "com.amazonaws.securitylake#CustomLogSourceName", "traits": { - "smithy.api#documentation": "

Specify the name for a third-party custom source. This must be a Regionally unique\n value.

", + "smithy.api#documentation": "

Specify the name for a third-party custom source. This must be a Regionally unique\n value. The sourceName you enter here, is used in the\n LogProviderRole name which follows the convention\n AmazonSecurityLake-Provider-{name of the custom source}-{region}. You must\n use a CustomLogSource name that is shorter than or equal to 20 characters.\n This ensures that the LogProviderRole name is below the 64 character\n limit.

", "smithy.api#required": {} } }, @@ -433,7 +433,7 @@ "configuration": { "target": "com.amazonaws.securitylake#CustomLogSourceConfiguration", "traits": { - "smithy.api#documentation": "

The configuration for the third-party custom source.

", + "smithy.api#documentation": "

The configuration used for the third-party custom source.

", "smithy.api#required": {} } } @@ -448,7 +448,7 @@ "source": { "target": "com.amazonaws.securitylake#CustomLogSourceResource", "traits": { - "smithy.api#documentation": "

The created third-party custom source.

" + "smithy.api#documentation": "

The third-party custom source that was created.

" } } }, @@ -516,7 +516,7 @@ "lambda:CreateFunction", "lambda:CreateEventSourceMapping" ], - "smithy.api#documentation": "

Initializes an Amazon Security Lake instance with the provided (or default) configuration. You\n can enable Security Lake in Amazon Web Services Regions with customized settings before enabling\n log collection in Regions. To specify particular Regions, configure these Regions using the\n configurations parameter. If you have already enabled Security Lake in a Region\n when you call this command, the command will update the Region if you provide new\n configuration parameters. If you have not already enabled Security Lake in the Region when you\n call this API, it will set up the data lake in the Region with the specified\n configurations.

\n

When you enable Security Lake, it starts ingesting security data after the\n CreateAwsLogSource call. This includes ingesting security data from\n sources, storing data, and making data accessible to subscribers. Security Lake also enables\n all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For\n more information, see the Amazon Security Lake User\n Guide.

", + "smithy.api#documentation": "

Initializes an Amazon Security Lake instance with the provided (or default) configuration. You\n can enable Security Lake in Amazon Web Services Regions with customized settings before enabling\n log collection in Regions. To specify particular Regions, configure these Regions using the\n configurations parameter. If you have already enabled Security Lake in a Region\n when you call this command, the command will update the Region if you provide new\n configuration parameters. If you have not already enabled Security Lake in the Region when you\n call this API, it will set up the data lake in the Region with the specified\n configurations.

\n

When you enable Security Lake, it starts ingesting security data after the\n CreateAwsLogSource call and after you create subscribers using the CreateSubscriber API. This includes ingesting security data from\n sources, storing data, and making data accessible to subscribers. Security Lake also enables\n all the existing settings and resources that it stores or maintains for your Amazon Web Services account in the current Region, including security log and event data. For\n more information, see the Amazon Security Lake User\n Guide.

", "smithy.api#http": { "method": "POST", "uri": "/v1/datalake", @@ -554,7 +554,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to get instant notifications about exceptions. Subscribes to the SNS topics for exception notifications", - "smithy.api#documentation": "

Creates the specified notification subscription in Amazon Security Lake for the organization\n you specify.

", + "smithy.api#documentation": "

Creates the specified notification subscription in Amazon Security Lake for the organization\n you specify. The notification subscription is created for exceptions that cannot be resolved by Security Lake automatically.

", "smithy.api#http": { "method": "POST", "uri": "/v1/datalake/exceptions/subscription", @@ -582,7 +582,7 @@ "exceptionTimeToLive": { "target": "smithy.api#Long", "traits": { - "smithy.api#documentation": "

The expiration period and time-to-live (TTL).

", + "smithy.api#documentation": "

The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains.

", "smithy.api#range": { "min": 1 } @@ -749,7 +749,7 @@ "ram:UpdateResourceShare", "ram:GetResourceShares" ], - "smithy.api#documentation": "

Creates a subscription permission for accounts that are already enabled in\n Amazon Security Lake. You can create a subscriber with access to data in the current Amazon Web Services Region.

", + "smithy.api#documentation": "

Creates a subscriber for accounts that are already enabled in Amazon Security Lake. You can\n create a subscriber with access to data in the current Amazon Web Services Region.

", "smithy.api#http": { "method": "POST", "uri": "/v1/subscribers", @@ -880,7 +880,7 @@ "sources": { "target": "com.amazonaws.securitylake#LogSourceResourceList", "traits": { - "smithy.api#documentation": "

The supported Amazon Web Services from which logs and events are collected.\n Security Lake supports log and event collection for natively supported Amazon Web Services.

", + "smithy.api#documentation": "

The supported Amazon Web Services services from which logs and events are collected.\n Security Lake supports log and event collection for natively supported Amazon Web Services services.

", "smithy.api#required": {} } }, @@ -947,7 +947,7 @@ "crawlerConfiguration": { "target": "com.amazonaws.securitylake#CustomLogSourceCrawlerConfiguration", "traits": { - "smithy.api#documentation": "

The configuration for the Glue Crawler for the third-party custom source.

", + "smithy.api#documentation": "

The configuration used for the Glue Crawler for a third-party custom source.

", "smithy.api#required": {} } }, @@ -960,7 +960,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration for the third-party custom source.

" + "smithy.api#documentation": "

The configuration used for the third-party custom source.

" } }, "com.amazonaws.securitylake#CustomLogSourceCrawlerConfiguration": { @@ -975,7 +975,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configuration for the Glue Crawler for the third-party custom source.

" + "smithy.api#documentation": "

The configuration used for the Glue Crawler for a third-party custom source.

" } }, "com.amazonaws.securitylake#CustomLogSourceName": { @@ -1187,7 +1187,7 @@ "kmsKeyId": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The id of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake\n object.

" + "smithy.api#documentation": "

The identifier of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake\n object.

" } } }, @@ -1364,7 +1364,7 @@ "createStatus": { "target": "com.amazonaws.securitylake#DataLakeStatus", "traits": { - "smithy.api#documentation": "

Retrieves the status of the configuration operation for an account in Amazon Security Lake.

" + "smithy.api#documentation": "

Retrieves the status of the CreateDatalake API call for an account in Amazon Security Lake.

" } }, "updateStatus": { @@ -1396,7 +1396,7 @@ "sourceName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The supported Amazon Web Services from which logs and events are collected.\n Amazon Security Lake supports log and event collection for natively supported Amazon Web Services.

" + "smithy.api#documentation": "

The supported Amazon Web Services services from which logs and events are collected.\n Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services.

" } }, "eventClasses": { @@ -1413,7 +1413,7 @@ } }, "traits": { - "smithy.api#documentation": "

Amazon Security Lake collects logs and events from supported Amazon Web Services and\n custom sources. For the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

" + "smithy.api#documentation": "

Amazon Security Lake collects logs and events from supported Amazon Web Services services and\n custom sources. For the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide.

" } }, "com.amazonaws.securitylake#DataLakeSourceList": { @@ -1556,7 +1556,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to disable any source type in any region for accounts that are part of a trusted organization or standalone accounts", - "smithy.api#documentation": "

Removes a natively supported Amazon Web Service as an Amazon Security Lake source. You\n can remove a source for one or more Regions. When you remove the source, Security Lake stops\n collecting data from that source in the specified Regions and accounts, and subscribers can\n no longer consume new data from the source. However, subscribers can still consume data\n that Security Lake collected from the source before removal.

\n

You can choose any source type in any Amazon Web Services Region for either accounts that\n are part of a trusted organization or standalone accounts.

", + "smithy.api#documentation": "

Removes a natively supported Amazon Web Services service as an Amazon Security Lake source. You\n can remove a source for one or more Regions. When you remove the source, Security Lake stops\n collecting data from that source in the specified Regions and accounts, and subscribers can\n no longer consume new data from the source. However, subscribers can still consume data\n that Security Lake collected from the source before removal.

\n

You can choose any source type in any Amazon Web Services Region for either accounts that\n are part of a trusted organization or standalone accounts.

", "smithy.api#http": { "method": "POST", "uri": "/v1/datalake/logsources/aws/delete", @@ -1945,7 +1945,7 @@ "sqs:DeleteQueue", "sqs:GetQueueUrl" ], - "smithy.api#documentation": "

Deletes the specified notification subscription in Amazon Security Lake for the organization\n you specify.

", + "smithy.api#documentation": "

Deletes the specified subscription notification in Amazon Security Lake for the organization\n you specify.

", "smithy.api#http": { "method": "DELETE", "uri": "/v1/subscribers/{subscriberId}/notification", @@ -2104,7 +2104,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to query the protocol and endpoint that were provided when subscribing to SNS topics for exception notifications", - "smithy.api#documentation": "

Retrieves the details of exception notifications for the account in Amazon Security Lake.

", + "smithy.api#documentation": "

Retrieves the protocol and endpoint that were provided when subscribing to Amazon SNS topics for exception notifications.

", "smithy.api#http": { "method": "GET", "uri": "/v1/datalake/exceptions/subscription", @@ -2138,7 +2138,7 @@ "exceptionTimeToLive": { "target": "smithy.api#Long", "traits": { - "smithy.api#documentation": "

The expiration period and time-to-live (TTL).

" + "smithy.api#documentation": "

The expiration period and time-to-live (TTL). It is the duration of time until which the exception message remains.

" } } }, @@ -2201,7 +2201,7 @@ "autoEnableNewAccount": { "target": "com.amazonaws.securitylake#DataLakeAutoEnableNewAccountConfigurationList", "traits": { - "smithy.api#documentation": "

The configuration for new accounts.

" + "smithy.api#documentation": "

The configuration used for new accounts in Security Lake.

" } } }, @@ -2430,7 +2430,7 @@ } }, "traits": { - "smithy.api#documentation": "

The configurations for HTTPS subscriber notification.

" + "smithy.api#documentation": "

The configurations used for HTTPS subscriber notification.

" } }, "com.amazonaws.securitylake#InternalServerException": { @@ -2505,13 +2505,13 @@ "target": "com.amazonaws.securitylake#MaxResults", "traits": { "smithy.api#default": 50, - "smithy.api#documentation": "

List the maximum number of failures in Security Lake.

" + "smithy.api#documentation": "

Lists the maximum number of failures in Security Lake.

" } }, "nextToken": { "target": "com.amazonaws.securitylake#NextToken", "traits": { - "smithy.api#documentation": "

List if there are more results available. The value of nextToken is a unique pagination\n token for each page. Repeat the call using the returned token to retrieve the next page.\n Keep all other arguments unchanged.

\n

Each pagination token expires after 24 hours. Using an expired pagination token will\n return an HTTP 400 InvalidToken error.

" + "smithy.api#documentation": "

Lists if there are more results available. The value of nextToken is a unique pagination\n token for each page. Repeat the call using the returned token to retrieve the next page.\n Keep all other arguments unchanged.

\n

Each pagination token expires after 24 hours. Using an expired pagination token will\n return an HTTP 400 InvalidToken error.

" } } }, @@ -2525,13 +2525,13 @@ "exceptions": { "target": "com.amazonaws.securitylake#DataLakeExceptionList", "traits": { - "smithy.api#documentation": "

Lists the failures that cannot be retried in the current Region.

" + "smithy.api#documentation": "

Lists the failures that cannot be retried.

" } }, "nextToken": { "target": "com.amazonaws.securitylake#NextToken", "traits": { - "smithy.api#documentation": "

List if there are more results available. The value of nextToken is a unique pagination\n token for each page. Repeat the call using the returned token to retrieve the next page.\n Keep all other arguments unchanged.

\n

Each pagination token expires after 24 hours. Using an expired pagination token will\n return an HTTP 400 InvalidToken error.

" + "smithy.api#documentation": "

Lists if there are more results available. The value of nextToken is a unique pagination\n token for each page. Repeat the call using the returned token to retrieve the next page.\n Keep all other arguments unchanged.

\n

Each pagination token expires after 24 hours. Using an expired pagination token will\n return an HTTP 400 InvalidToken error.

" } } }, @@ -2637,7 +2637,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to view the enabled accounts. You can view the enabled sources in the enabled regions", - "smithy.api#documentation": "

Retrieves the log sources in the current Amazon Web Services Region.

", + "smithy.api#documentation": "

Retrieves the log sources.

", "smithy.api#http": { "method": "POST", "uri": "/v1/datalake/logsources/list", @@ -2741,7 +2741,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to list all subscribers", - "smithy.api#documentation": "

List all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list\n of subscriptions associated with a specific organization or Amazon Web Services account.

", + "smithy.api#documentation": "

Lists all subscribers for the specific Amazon Security Lake account ID. You can retrieve a list\n of subscriptions associated with a specific organization or Amazon Web Services account.

", "smithy.api#http": { "method": "GET", "uri": "/v1/subscribers", @@ -2905,7 +2905,7 @@ "awsLogSource": { "target": "com.amazonaws.securitylake#AwsLogSourceResource", "traits": { - "smithy.api#documentation": "

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide.

" + "smithy.api#documentation": "

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

" } }, "customLogSource": { @@ -2916,7 +2916,7 @@ } }, "traits": { - "smithy.api#documentation": "

The supported source types from which logs and events are collected in Amazon Security Lake.\n For a list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

" + "smithy.api#documentation": "

The supported source types from which logs and events are collected in Amazon Security Lake.\n For a list of supported Amazon Web Services services, see the Amazon Security Lake User Guide.

" } }, "com.amazonaws.securitylake#LogSourceResourceList": { @@ -2954,7 +2954,7 @@ "httpsNotificationConfiguration": { "target": "com.amazonaws.securitylake#HttpsNotificationConfiguration", "traits": { - "smithy.api#documentation": "

The configurations for HTTPS subscriber notification.

" + "smithy.api#documentation": "

The configurations used for HTTPS subscriber notification.

" } } }, @@ -3219,7 +3219,7 @@ ], "maxAge": 86400 }, - "smithy.api#documentation": "

Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to\n automatically centralize security data from cloud, on-premises, and custom sources into a\n data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations\n is an account management service that lets you consolidate multiple Amazon Web Services\n accounts into an organization that you create and centrally manage. With Organizations, you\n can create member accounts and invite existing accounts to join your organization.\n Security Lake helps you analyze security data for a more complete understanding of your\n security posture across the entire organization. It can also help you improve the\n protection of your workloads, applications, and data.

\n

The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you\n retain ownership over your data.

\n

Amazon Security Lake integrates with CloudTrail, a service that provides a record of\n actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls\n from the Security Lake console and code calls to the Security Lake API operations. If you create a\n trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still\n view the most recent events in the CloudTrail console in Event history. Using the\n information collected by CloudTrail you can determine the request that was made to\n Security Lake, the IP address from which the request was made, who made the request, when it\n was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide.

\n

Security Lake automates the collection of security-related log and event data from\n integrated Amazon Web Services and third-party services. It also helps you manage\n the lifecycle of data with customizable retention and replication settings. Security Lake\n converts ingested data into Apache Parquet format and a standard open-source schema called\n the Open Cybersecurity Schema Framework (OCSF).

\n

Other Amazon Web Services and third-party services can subscribe to the data that's stored in Security Lake for \n incident response and security data analytics.

", + "smithy.api#documentation": "

Amazon Security Lake is a fully managed security data lake service. You can use Security Lake to\n automatically centralize security data from cloud, on-premises, and custom sources into a\n data lake that's stored in your Amazon Web Services account. Amazon Web Services Organizations\n is an account management service that lets you consolidate multiple Amazon Web Services\n accounts into an organization that you create and centrally manage. With Organizations, you\n can create member accounts and invite existing accounts to join your organization.\n Security Lake helps you analyze security data for a more complete understanding of your\n security posture across the entire organization. It can also help you improve the\n protection of your workloads, applications, and data.

\n

The data lake is backed by Amazon Simple Storage Service (Amazon S3) buckets, and you\n retain ownership over your data.

\n

Amazon Security Lake integrates with CloudTrail, a service that provides a record of\n actions taken by a user, role, or an Amazon Web Services service. In Security Lake, CloudTrail captures API calls for Security Lake as events. The calls captured include calls\n from the Security Lake console and code calls to the Security Lake API operations. If you create a\n trail, you can enable continuous delivery of CloudTrail events to an Amazon S3 bucket, including events for Security Lake. If you don't configure a trail, you can still\n view the most recent events in the CloudTrail console in Event history. Using the\n information collected by CloudTrail you can determine the request that was made to\n Security Lake, the IP address from which the request was made, who made the request, when it\n was made, and additional details. To learn more about Security Lake information in CloudTrail, see the Amazon Security Lake User Guide.

\n

Security Lake automates the collection of security-related log and event data from\n integrated Amazon Web Services services and third-party services. It also helps you manage\n the lifecycle of data with customizable retention and replication settings. Security Lake\n converts ingested data into Apache Parquet format and a standard open-source schema called\n the Open Cybersecurity Schema Framework (OCSF).

\n

Other Amazon Web Services services and third-party services can subscribe to the data that's stored in Security Lake for \n incident response and security data analytics.

", "smithy.api#title": "Amazon Security Lake", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -3914,7 +3914,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

The configurations for SQS subscriber notification.

" + "smithy.api#documentation": "

The configurations used for EventBridge subscriber notification.

" } }, "com.amazonaws.securitylake#Subscriber": { @@ -4001,7 +4001,7 @@ "sources": { "target": "com.amazonaws.securitylake#LogSourceResourceList", "traits": { - "smithy.api#documentation": "

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services. For more information, see the Amazon Security Lake User Guide.

", + "smithy.api#documentation": "

Amazon Security Lake supports log and event collection for natively supported Amazon Web Services services. For more information, see the Amazon Security Lake User Guide.

", "smithy.api#required": {} } }, @@ -4421,7 +4421,7 @@ "lambda:CreateFunction", "lambda:CreateEventSourceMapping" ], - "smithy.api#documentation": "

Specifies where to store your security data and for how long. You can add a rollup\n Region to consolidate data from multiple Amazon Web Services Regions.

", + "smithy.api#documentation": "

You can use UpdateDataLake to specify where to store your security data, how it should\n be encrypted at rest and for how long. You can add a Rollup\n Region to consolidate data from multiple Amazon Web Services Regions, replace\n default encryption (SSE-S3) with Customer Manged Key,\n or specify transition and expiration actions through storage Lifecycle management. The UpdateDataLake API works as an \"upsert\" operation that performs an insert if the specified item or record does not exist, or an update if it\n already exists. Security Lake securely stores your data at rest using Amazon Web Services encryption solutions. For more details, see Data protection in Amazon Security Lake.

\n

For example, omitting the key encryptionConfiguration from a Region that is\n included in an update call that currently uses KMS will leave that Region's KMS key in\n place, but specifying encryptionConfiguration: {kmsKeyId: 'S3_MANAGED_KEY'}\n for that same Region will reset the key to S3-managed.

\n

For more details about lifecycle management and how to update retention settings for one or more Regions after enabling Security Lake, see the Amazon Security Lake User Guide.

", "smithy.api#http": { "method": "PUT", "uri": "/v1/datalake", @@ -4489,7 +4489,7 @@ "exceptionTimeToLive": { "target": "smithy.api#Long", "traits": { - "smithy.api#documentation": "

The time-to-live (TTL) for the exception message to remain.

", + "smithy.api#documentation": "

The time-to-live (TTL) for the exception message to remain. It is the duration of time until which the exception message remains.

", "smithy.api#range": { "min": 1 } @@ -4513,7 +4513,7 @@ "configurations": { "target": "com.amazonaws.securitylake#DataLakeConfigurationList", "traits": { - "smithy.api#documentation": "

Specify the Region or Regions that will contribute data to the rollup region.

", + "smithy.api#documentation": "

Specifies the Region or Regions that will contribute data to the rollup region.

", "smithy.api#required": {} } }, @@ -4710,7 +4710,7 @@ "subscriberIdentity": { "target": "com.amazonaws.securitylake#AwsIdentity", "traits": { - "smithy.api#documentation": "

The AWS identity used to access your data.

" + "smithy.api#documentation": "

The Amazon Web Services identity used to access your data.

" } }, "subscriberName": { @@ -4731,7 +4731,7 @@ "sources": { "target": "com.amazonaws.securitylake#LogSourceResourceList", "traits": { - "smithy.api#documentation": "

The supported Amazon Web Services from which logs and events are collected. For\n the list of supported Amazon Web Services, see the Amazon Security Lake User Guide.

" + "smithy.api#documentation": "

The supported Amazon Web Services services from which logs and events are collected. For\n the list of supported Amazon Web Services services, see the Amazon Security Lake User Guide.

" } } }, diff --git a/models/sesv2.json b/models/sesv2.json index 98594547b2..dcffa149ff 100644 --- a/models/sesv2.json +++ b/models/sesv2.json @@ -2820,6 +2820,12 @@ "traits": { "smithy.api#documentation": "

The name of the dedicated IP pool to associate with the configuration set.

" } + }, + "MaxDeliverySeconds": { + "target": "com.amazonaws.sesv2#MaxDeliverySeconds", + "traits": { + "smithy.api#documentation": "

The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email.\n If specified, the value must greater than or equal to 300 seconds (5 minutes)\n and less than or equal to 50400 seconds (840 minutes).\n

" + } } }, "traits": { @@ -5696,6 +5702,32 @@ "smithy.api#documentation": "

An object containing additional settings for your VDM configuration as applicable to\n the Guardian.

" } }, + "com.amazonaws.sesv2#HttpsPolicy": { + "type": "enum", + "members": { + "REQUIRE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REQUIRE" + } + }, + "REQUIRE_OPEN_ONLY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REQUIRE_OPEN_ONLY" + } + }, + "OPTIONAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "OPTIONAL" + } + } + }, + "traits": { + "smithy.api#documentation": "

The https policy to use for tracking open and click events. If the value is OPTIONAL or HttpsPolicy is not\n specified, the open trackers use HTTP and click tracker use the original protocol of the link.\n If the value is REQUIRE, both open and click tracker uses HTTPS and if the value is REQUIRE_OPEN_ONLY\n open tracker uses HTTPS and link tracker is same as original protocol of the link.\n

" + } + }, "com.amazonaws.sesv2#Identity": { "type": "string", "traits": { @@ -7472,6 +7504,15 @@ "smithy.api#default": 0 } }, + "com.amazonaws.sesv2#MaxDeliverySeconds": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 300, + "max": 50400 + } + } + }, "com.amazonaws.sesv2#MaxItems": { "type": "integer" }, @@ -8455,6 +8496,12 @@ "traits": { "smithy.api#documentation": "

The name of the dedicated IP pool to associate with the configuration set.

" } + }, + "MaxDeliverySeconds": { + "target": "com.amazonaws.sesv2#MaxDeliverySeconds", + "traits": { + "smithy.api#documentation": "

The maximum amount of time, in seconds, that Amazon SES API v2 will attempt delivery of email.\n If specified, the value must greater than or equal to 300 seconds (5 minutes)\n and less than or equal to 50400 seconds (840 minutes).\n

" + } } }, "traits": { @@ -8693,6 +8740,9 @@ "traits": { "smithy.api#documentation": "

The domain to use to track open and click events.

" } + }, + "HttpsPolicy": { + "target": "com.amazonaws.sesv2#HttpsPolicy" } }, "traits": { @@ -11953,6 +12003,12 @@ "smithy.api#documentation": "

The domain to use for tracking open and click events.

", "smithy.api#required": {} } + }, + "HttpsPolicy": { + "target": "com.amazonaws.sesv2#HttpsPolicy", + "traits": { + "smithy.api#documentation": "

The https policy to use for tracking open and click events.

" + } } }, "traits": { diff --git a/models/socialmessaging.json b/models/socialmessaging.json new file mode 100644 index 0000000000..ea5d80e8f2 --- /dev/null +++ b/models/socialmessaging.json @@ -0,0 +1,2689 @@ +{ + "smithy": "2.0", + "shapes": { + "com.amazonaws.socialmessaging#AccessDeniedByMetaException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.socialmessaging#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

You do not have sufficient access to perform this action.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.socialmessaging#Arn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:.*$" + } + }, + "com.amazonaws.socialmessaging#AssociateInProgressToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 50 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.socialmessaging#AssociateWhatsAppBusinessAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#AssociateWhatsAppBusinessAccountInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#AssociateWhatsAppBusinessAccountOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:ResourceTag/${TagKey}", + "aws:TagKeys" + ], + "smithy.api#documentation": "

This is only used through the Amazon Web Services console during sign-up to associate your WhatsApp Business Account to your Amazon Web Services account.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/whatsapp/signup", + "code": 200 + } + } + }, + "com.amazonaws.socialmessaging#AssociateWhatsAppBusinessAccountInput": { + "type": "structure", + "members": { + "signupCallback": { + "target": "com.amazonaws.socialmessaging#WhatsAppSignupCallback", + "traits": { + "smithy.api#documentation": "

Contains the callback access token.

" + } + }, + "setupFinalization": { + "target": "com.amazonaws.socialmessaging#WhatsAppSetupFinalization", + "traits": { + "smithy.api#documentation": "

A JSON object that contains the phone numbers and WhatsApp Business Account to link to your account.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#AssociateWhatsAppBusinessAccountOutput": { + "type": "structure", + "members": { + "signupCallbackResult": { + "target": "com.amazonaws.socialmessaging#WhatsAppSignupCallbackResult", + "traits": { + "smithy.api#documentation": "

Contains your WhatsApp registration status.

" + } + }, + "statusCode": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The status code for the response.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#DeleteWhatsAppMessageMedia": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#DeleteWhatsAppMessageMediaInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#DeleteWhatsAppMessageMediaOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#AccessDeniedByMetaException" + }, + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete a media object from the WhatsApp service. If the object is still in an Amazon S3 bucket you should delete it from there too.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/whatsapp/media", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.socialmessaging#DeleteWhatsAppMessageMediaInput": { + "type": "structure", + "members": { + "mediaId": { + "target": "com.amazonaws.socialmessaging#WhatsAppMediaId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the media file to delete. Use the mediaId returned from PostWhatsAppMessageMedia.

", + "smithy.api#httpQuery": "mediaId", + "smithy.api#required": {} + } + }, + "originationPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the originating phone number associated with the media. Phone\n number identifiers are formatted as\n phone-number-id-01234567890123456789012345678901. Use\n GetLinkedWhatsAppBusinessAccount to find a phone number's\n id.

", + "smithy.api#httpQuery": "originationPhoneNumberId", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "OriginationPhoneNumberId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#DeleteWhatsAppMessageMediaOutput": { + "type": "structure", + "members": { + "success": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Success indicator for deleting the media file.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#DependencyException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

Thrown when performing an action because a dependency would be broken.

", + "smithy.api#error": "server", + "smithy.api#httpError": 502, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.socialmessaging#DisassociateWhatsAppBusinessAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#DisassociateWhatsAppBusinessAccountInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#DisassociateWhatsAppBusinessAccountOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Disassociate a WhatsApp Business Account (WABA) from your Amazon Web Services account.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/whatsapp/waba/disassociate", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.socialmessaging#DisassociateWhatsAppBusinessAccountInput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as\n waba-01234567890123456789012345678901. Use\n ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details.

", + "smithy.api#httpQuery": "id", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "WabaId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#DisassociateWhatsAppBusinessAccountOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#ErrorMessage": { + "type": "string" + }, + "com.amazonaws.socialmessaging#EventDestinationArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:.*:[a-z-]+([/:](.*))?$" + } + }, + "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Get the details of your linked WhatsApp Business Account.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/whatsapp/waba/details", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountInput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier, from Amazon Web Services, of the linked WhatsApp Business\n Account. WABA identifiers are formatted as\n waba-01234567890123456789012345678901. Use\n ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details.

", + "smithy.api#httpQuery": "id", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "WabaId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountOutput": { + "type": "structure", + "members": { + "account": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccount", + "traits": { + "smithy.api#documentation": "

The details of the linked WhatsApp Business Account.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountPhoneNumber": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountPhoneNumberInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountPhoneNumberOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Use your WhatsApp phone number id to get the WABA account id and phone number\n details.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/whatsapp/waba/phone/details", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountPhoneNumberInput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the phone number. Phone number\n identifiers are formatted as phone-number-id-01234567890123456789012345678901.\n Use GetLinkedWhatsAppBusinessAccount to find a phone number's\n id.

", + "smithy.api#httpQuery": "id", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "OriginationPhoneNumberId" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountPhoneNumberOutput": { + "type": "structure", + "members": { + "phoneNumber": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberDetail" + }, + "linkedWhatsAppBusinessAccountId": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The WABA identifier linked to the phone number, formatted as\n waba-01234567890123456789012345678901.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#GetWhatsAppMessageMedia": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#GetWhatsAppMessageMediaInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#GetWhatsAppMessageMediaOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#AccessDeniedByMetaException" + }, + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Get a media file from the WhatsApp service. On successful completion the media file is\n retrieved from Meta and stored in the specified Amazon S3 bucket. Use either\n destinationS3File or destinationS3PresignedUrl for the\n destination. If both are used then an InvalidParameterException is\n returned.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/whatsapp/media/get", + "code": 200 + } + } + }, + "com.amazonaws.socialmessaging#GetWhatsAppMessageMediaInput": { + "type": "structure", + "members": { + "mediaId": { + "target": "com.amazonaws.socialmessaging#WhatsAppMediaId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the media file.

", + "smithy.api#required": {} + } + }, + "originationPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the originating phone number for the WhatsApp message media.\n The phone number identifiers are formatted as\n phone-number-id-01234567890123456789012345678901. Use\n GetLinkedWhatsAppBusinessAccount to find a phone number's\n id.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "OriginationPhoneNumberId" + } + }, + "metadataOnly": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Set to True to get only the metadata for the file.

" + } + }, + "destinationS3PresignedUrl": { + "target": "com.amazonaws.socialmessaging#S3PresignedUrl", + "traits": { + "smithy.api#documentation": "

The presign url of the media file.

" + } + }, + "destinationS3File": { + "target": "com.amazonaws.socialmessaging#S3File", + "traits": { + "smithy.api#documentation": "

The bucketName and key of the S3 media file.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#GetWhatsAppMessageMediaOutput": { + "type": "structure", + "members": { + "mimeType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The MIME type of the media.

" + } + }, + "fileSize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

The file size of the media, in KB.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#Headers": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.socialmessaging#InternalServiceException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request processing has failed because of an unknown error, exception, or\n failure.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.socialmessaging#InvalidParametersException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

One or more parameters provided to the action are not valid.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.socialmessaging#IsoCountryCode": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Z]{2}$" + } + }, + "com.amazonaws.socialmessaging#LinkedAccountWithIncompleteSetup": { + "type": "map", + "key": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountId" + }, + "value": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountIdMetaData" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccount": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountArn", + "traits": { + "smithy.api#documentation": "

The ARN of the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the linked WhatsApp Business Account, formatted as waba-01234567890123456789012345678901.

", + "smithy.api#required": {} + } + }, + "wabaId": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The WhatsApp Business Account ID from meta.

", + "smithy.api#required": {} + } + }, + "registrationStatus": { + "target": "com.amazonaws.socialmessaging#RegistrationStatus", + "traits": { + "smithy.api#documentation": "

The registration status of the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "linkDate": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountLinkDate", + "traits": { + "smithy.api#documentation": "

The date the WhatsApp Business Account was linked.

", + "smithy.api#required": {} + } + }, + "wabaName": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountName", + "traits": { + "smithy.api#documentation": "

The name of the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "eventDestinations": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestinations", + "traits": { + "smithy.api#documentation": "

The event destinations for the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "phoneNumbers": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberSummaryList", + "traits": { + "smithy.api#documentation": "

The phone numbers associated with the Linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of your linked WhatsApp Business Account.

" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:.*:waba/[0-9a-zA-Z]+$" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "(^waba-.*$)|(^arn:.*:waba/[0-9a-zA-Z]+$)" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountIdMetaData": { + "type": "structure", + "members": { + "accountName": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountName", + "traits": { + "smithy.api#documentation": "

The name of your account.

" + } + }, + "registrationStatus": { + "target": "com.amazonaws.socialmessaging#RegistrationStatus", + "traits": { + "smithy.api#documentation": "

The registration status of the linked WhatsApp Business Account.

" + } + }, + "unregisteredWhatsAppPhoneNumbers": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberDetailList", + "traits": { + "smithy.api#documentation": "

The details for unregistered WhatsApp phone numbers.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains your WhatsApp registration status and details of any unregistered WhatsApp\n phone number.

" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountResource": { + "type": "resource", + "identifiers": { + "WabaId": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId" + } + }, + "create": { + "target": "com.amazonaws.socialmessaging#AssociateWhatsAppBusinessAccount" + }, + "read": { + "target": "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccount" + }, + "delete": { + "target": "com.amazonaws.socialmessaging#DisassociateWhatsAppBusinessAccount" + }, + "list": { + "target": "com.amazonaws.socialmessaging#ListLinkedWhatsAppBusinessAccounts" + }, + "operations": [ + { + "target": "com.amazonaws.socialmessaging#PutWhatsAppBusinessAccountEventDestinations" + } + ], + "traits": { + "aws.api#arn": { + "template": "waba/{WabaId}" + }, + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}" + ], + "aws.iam#iamResource": { + "name": "waba", + "relativeDocumentation": "managing-wabas.html" + } + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountSummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountArn", + "traits": { + "smithy.api#documentation": "

The ARN of the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the linked WhatsApp Business Account, formatted as waba-01234567890123456789012345678901.

", + "smithy.api#required": {} + } + }, + "wabaId": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The WhatsApp Business Account ID provided by Meta.

", + "smithy.api#required": {} + } + }, + "registrationStatus": { + "target": "com.amazonaws.socialmessaging#RegistrationStatus", + "traits": { + "smithy.api#documentation": "

The registration status of the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "linkDate": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountLinkDate", + "traits": { + "smithy.api#documentation": "

The date the WhatsApp Business Account was linked.

", + "smithy.api#required": {} + } + }, + "wabaName": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountName", + "traits": { + "smithy.api#documentation": "

The name of the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "eventDestinations": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestinations", + "traits": { + "smithy.api#documentation": "

The event destinations for the linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a linked WhatsApp Business Account.

" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountSummary" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:.*:phone-number-id/[0-9a-zA-Z]+$" + } + }, + "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberResource": { + "type": "resource", + "identifiers": { + "OriginationPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId" + } + }, + "read": { + "target": "com.amazonaws.socialmessaging#GetLinkedWhatsAppBusinessAccountPhoneNumber" + }, + "operations": [ + { + "target": "com.amazonaws.socialmessaging#DeleteWhatsAppMessageMedia" + }, + { + "target": "com.amazonaws.socialmessaging#GetWhatsAppMessageMedia" + }, + { + "target": "com.amazonaws.socialmessaging#PostWhatsAppMessageMedia" + }, + { + "target": "com.amazonaws.socialmessaging#SendWhatsAppMessage" + } + ], + "traits": { + "aws.api#arn": { + "template": "phone-number-id/{OriginationPhoneNumberId}" + }, + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}" + ], + "aws.iam#iamResource": { + "name": "phone-number-id", + "relativeDocumentation": "managing-phone-numbers.html" + } + } + }, + "com.amazonaws.socialmessaging#ListLinkedWhatsAppBusinessAccounts": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#ListLinkedWhatsAppBusinessAccountsInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#ListLinkedWhatsAppBusinessAccountsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

List all WhatsApp Business Accounts linked to your Amazon Web Services account.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/whatsapp/waba/list", + "code": 200 + }, + "smithy.api#paginated": { + "items": "linkedAccounts" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.socialmessaging#ListLinkedWhatsAppBusinessAccountsInput": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.socialmessaging#NextToken", + "traits": { + "smithy.api#documentation": "

The next token for pagination.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.socialmessaging#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to return.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#ListLinkedWhatsAppBusinessAccountsOutput": { + "type": "structure", + "members": { + "linkedAccounts": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountSummaryList", + "traits": { + "smithy.api#documentation": "

A list of WhatsApp Business Accounts linked to your Amazon Web Services account.

" + } + }, + "nextToken": { + "target": "com.amazonaws.socialmessaging#NextToken", + "traits": { + "smithy.api#documentation": "

The next token for pagination.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#ListTagsForResourceInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#ListTagsForResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

List all tags associated with a resource, such as a phone number or WABA.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/tags/list", + "code": 200 + }, + "smithy.api#readonly": {}, + "smithy.test#smokeTests": [ + { + "id": "ListTagsFailure", + "params": { + "resourceArn": "arn:aws:social-messaging:us-east-1:9923825:phone-number-id/45c1973a7577" + }, + "expect": { + "failure": {} + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "vendorParams": { + "region": "us-east-1" + } + } + ] + } + }, + "com.amazonaws.socialmessaging#ListTagsForResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.socialmessaging#Arn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to retrieve the tags from.

", + "smithy.api#httpQuery": "resourceArn", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#ListTagsForResourceOutput": { + "type": "structure", + "members": { + "statusCode": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The status code of the response.

" + } + }, + "tags": { + "target": "com.amazonaws.socialmessaging#TagList", + "traits": { + "smithy.api#documentation": "

The tags for the resource.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.socialmessaging#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 600 + } + } + }, + "com.amazonaws.socialmessaging#PhoneNumber": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.socialmessaging#PostWhatsAppMessageMedia": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#PostWhatsAppMessageMediaInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#PostWhatsAppMessageMediaOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#AccessDeniedByMetaException" + }, + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Upload a media file to the WhatsApp service. Only the specified\n originationPhoneNumberId has the permissions to send the media file when\n using SendWhatsAppMessage. You must use either sourceS3File\n or sourceS3PresignedUrl for the source. If both or neither are specified then an\n InvalidParameterException is returned.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/whatsapp/media", + "code": 200 + } + } + }, + "com.amazonaws.socialmessaging#PostWhatsAppMessageMediaInput": { + "type": "structure", + "members": { + "originationPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The ID of the phone number to associate with the WhatsApp media file. The phone number\n identifiers are formatted as phone-number-id-01234567890123456789012345678901.\n Use GetLinkedWhatsAppBusinessAccount to find a phone number's\n id.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "OriginationPhoneNumberId" + } + }, + "sourceS3PresignedUrl": { + "target": "com.amazonaws.socialmessaging#S3PresignedUrl", + "traits": { + "smithy.api#documentation": "

The source presign url of the media file.

" + } + }, + "sourceS3File": { + "target": "com.amazonaws.socialmessaging#S3File", + "traits": { + "smithy.api#documentation": "

The source S3 url for the media file.

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#PostWhatsAppMessageMediaOutput": { + "type": "structure", + "members": { + "mediaId": { + "target": "com.amazonaws.socialmessaging#WhatsAppMediaId", + "traits": { + "smithy.api#documentation": "

The unique identifier of the posted WhatsApp message.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#PutWhatsAppBusinessAccountEventDestinations": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#PutWhatsAppBusinessAccountEventDestinationsInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#PutWhatsAppBusinessAccountEventDestinationsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Add an event destination to log event data from WhatsApp for a WhatsApp Business Account (WABA). A WABA can only have one event destination at a time. All resources associated with the WABA use the same event destination.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/v1/whatsapp/waba/eventdestinations", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.socialmessaging#PutWhatsAppBusinessAccountEventDestinationsInput": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The unique identifier of your WhatsApp Business Account. WABA identifiers are formatted as\n waba-01234567890123456789012345678901. Use\n ListLinkedWhatsAppBusinessAccounts to list all WABAs and their details.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "WabaId" + } + }, + "eventDestinations": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestinations", + "traits": { + "smithy.api#documentation": "

An array of WhatsAppBusinessAccountEventDestination event destinations.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#PutWhatsAppBusinessAccountEventDestinationsOutput": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#RegistrationStatus": { + "type": "enum", + "members": { + "COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETE" + } + }, + "INCOMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCOMPLETE" + } + } + } + }, + "com.amazonaws.socialmessaging#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The resource was not found.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.socialmessaging#S3File": { + "type": "structure", + "members": { + "bucketName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The bucket name.

", + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9][a-z0-9.-]*[a-z0-9]$", + "smithy.api#required": {} + } + }, + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The object key of the media file.

", + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information for the S3 bucket that contains media files.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.socialmessaging#S3PresignedUrl": { + "type": "structure", + "members": { + "url": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The presign url to the object.

", + "smithy.api#length": { + "min": 1, + "max": 2000 + }, + "smithy.api#pattern": "^https://(.*)s3(.*).amazonaws.com/(.*)$", + "smithy.api#required": {} + } + }, + "headers": { + "target": "com.amazonaws.socialmessaging#Headers", + "traits": { + "smithy.api#documentation": "

A map of headers and their values. You must specify the Content-Type header when using PostWhatsAppMessageMedia. For a list of common headers, see Common Request Headers in the Amazon S3\n API Reference\n

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

You can use presigned URLs to grant time-limited access to objects in Amazon S3 without updating your bucket policy. For more information, see Working with presigned URLs in the Amazon S3\n User Guide.

", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.socialmessaging#SendWhatsAppMessage": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#SendWhatsAppMessageInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#SendWhatsAppMessageOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#DependencyException" + }, + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "smithy.api#documentation": "

Send a WhatsApp message. For examples of sending a message using the Amazon Web Services\n CLI, see Sending messages in the\n \n Amazon Web Services End User Messaging Social User Guide\n .

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/whatsapp/send", + "code": 200 + } + } + }, + "com.amazonaws.socialmessaging#SendWhatsAppMessageInput": { + "type": "structure", + "members": { + "originationPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The ID of the phone number used to send the WhatsApp message. If you are sending a media\n file only the originationPhoneNumberId used to upload the file can be used.\n Phone number identifiers are formatted as\n phone-number-id-01234567890123456789012345678901. Use\n GetLinkedWhatsAppBusinessAccount to find a phone number's\n id.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "OriginationPhoneNumberId" + } + }, + "message": { + "target": "com.amazonaws.socialmessaging#WhatsAppMessageBlob", + "traits": { + "smithy.api#documentation": "

The message to send through WhatsApp. The length is in KB. The message field passes through a WhatsApp\n Message object, see Messages in the WhatsApp Business Platform Cloud API\n Reference.

", + "smithy.api#required": {} + } + }, + "metaApiVersion": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The API version for the request formatted as v{VersionNumber}. For a list of supported API versions and Amazon Web Services Regions, see \n Amazon Web Services End User Messaging Social API Service Endpoints in the Amazon Web Services General Reference.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.socialmessaging#SendWhatsAppMessageOutput": { + "type": "structure", + "members": { + "messageId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The unique identifier of the message.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#SocialMessaging": { + "type": "service", + "version": "2024-01-01", + "operations": [ + { + "target": "com.amazonaws.socialmessaging#ListTagsForResource" + }, + { + "target": "com.amazonaws.socialmessaging#TagResource" + }, + { + "target": "com.amazonaws.socialmessaging#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountResource" + }, + { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberResource" + } + ], + "errors": [ + { + "target": "com.amazonaws.socialmessaging#AccessDeniedException" + }, + { + "target": "com.amazonaws.socialmessaging#ValidationException" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "SocialMessaging", + "arnNamespace": "social-messaging", + "cloudTrailEventSource": "social-messaging.amazonaws.com", + "endpointPrefix": "social-messaging" + }, + "aws.auth#sigv4": { + "name": "social-messaging" + }, + "aws.iam#defineConditionKeys": { + "aws:RequestTag/${TagKey}": { + "type": "String", + "documentation": "Filters access by the tags that are passed in the request", + "externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requesttag" + }, + "aws:ResourceTag/${TagKey}": { + "type": "String", + "documentation": "Filters access by the tags associated with the resource", + "externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-resourcetag" + }, + "aws:TagKeys": { + "type": "ArrayOfString", + "documentation": "Filters access by the tag keys that are passed in the request", + "externalDocumentation": "${DocHomeURL}IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-tagkeys" + } + }, + "aws.iam#disableConditionKeyInference": {}, + "aws.iam#supportedPrincipalTypes": [ + "Root", + "IAMUser", + "IAMRole", + "FederatedUser" + ], + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "origin": "*", + "additionalAllowedHeaders": [ + "authorization", + "x-amz-date", + "x-amz-security-token", + "content-type", + "x-amz-content-sha256", + "x-amz-user-agent", + "x-amzn-platform-id", + "x-amzn-trace-id", + "content-length", + "x-api-key", + "amz-sdk-request", + "amz-sdk-invocation-id", + "Csrf-Token" + ], + "additionalExposedHeaders": [ + "x-amzn-errortype", + "x-amzn-requestid", + "x-amzn-trace-id", + "Csrf-Token" + ] + }, + "smithy.api#documentation": "

\n Amazon Web Services End User Messaging Social, also referred to as Social messaging, is a messaging service that enables\n application developers to incorporate WhatsApp into their existing workflows. The Amazon Web Services End User Messaging Social API provides information about the\n Amazon Web Services End User Messaging Social API resources, including supported HTTP methods, parameters, and schemas.

\n

The Amazon Web Services End User Messaging Social API provides programmatic access to options that are unique to the WhatsApp Business Platform.

\n

If you're new to the Amazon Web Services End User Messaging Social API, it's also helpful to review What is\n Amazon Web Services End User Messaging Social in the Amazon Web Services End User Messaging Social User Guide. The\n Amazon Web Services End User Messaging Social User Guide provides tutorials, code samples, and procedures that demonstrate how to use\n Amazon Web Services End User Messaging Social API features programmatically and how to integrate functionality into applications.\n The guide also provides key information, such as integration with other Amazon Web Services\n services, and the quotas that apply to use of the service.

\n

\n Regional availability\n

\n

The Amazon Web Services End User Messaging Social API is available across several Amazon Web Services Regions and it provides a dedicated endpoint for each of these Regions. For a list of\n all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Web Services End User Messaging endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see\n Managing\n Amazon Web Services Regions in the Amazon Web Services General\n Reference.

\n

In each Region, Amazon Web Services maintains multiple Availability Zones. These\n Availability Zones are physically isolated from each other, but are united by private,\n low-latency, high-throughput, and highly redundant network connections. These Availability\n Zones enable us to provide very high levels of availability and redundancy, while also\n minimizing latency. To learn more about the number of Availability Zones that are available\n in each Region, see Amazon Web Services Global Infrastructure.\n

", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#title": "AWS End User Messaging Social", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://social-messaging-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://social-messaging-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://social-messaging.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://social-messaging.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://social-messaging.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.socialmessaging#StringList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.socialmessaging#Tag": { + "type": "structure", + "members": { + "key": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The tag key.

", + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The tag value.

", + "smithy.api#length": { + "max": 256 + } + } + } + }, + "traits": { + "smithy.api#documentation": "

The tag for a resource.

" + } + }, + "com.amazonaws.socialmessaging#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.socialmessaging#Tag" + } + }, + "com.amazonaws.socialmessaging#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#TagResourceInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#TagResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:RequestTag/${TagKey}", + "aws:ResourceTag/${TagKey}", + "aws:TagKeys" + ], + "smithy.api#documentation": "

Adds or overwrites only the specified tags for the specified resource. When you specify\n an existing tag key, the value is overwritten with the new value.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/tags/tag-resource", + "code": 200 + } + } + }, + "com.amazonaws.socialmessaging#TagResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.socialmessaging#Arn", + "traits": { + "aws.api#data": "tagging", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to tag.

", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.socialmessaging#TagList", + "traits": { + "smithy.api#documentation": "

The tags to add to the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberResource", + "ids": { + "OriginationPhoneNumberId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountResource", + "ids": { + "WabaId": "resourceArn" + } + } + ] + } + }, + "com.amazonaws.socialmessaging#TagResourceOutput": { + "type": "structure", + "members": { + "statusCode": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The status code of the tag resource operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#ThrottledRequestException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request was denied due to request throttling.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.socialmessaging#TwoFactorPin": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 6 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.socialmessaging#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.socialmessaging#UntagResourceInput" + }, + "output": { + "target": "com.amazonaws.socialmessaging#UntagResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.socialmessaging#InternalServiceException" + }, + { + "target": "com.amazonaws.socialmessaging#InvalidParametersException" + }, + { + "target": "com.amazonaws.socialmessaging#ThrottledRequestException" + } + ], + "traits": { + "aws.iam#conditionKeys": [ + "aws:ResourceTag/${TagKey}", + "aws:TagKeys" + ], + "smithy.api#documentation": "

Removes the specified tags from a resource.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/tags/untag-resource", + "code": 200 + } + } + }, + "com.amazonaws.socialmessaging#UntagResourceInput": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.socialmessaging#Arn", + "traits": { + "aws.api#data": "tagging", + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the resource to remove tags from.

", + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.socialmessaging#StringList", + "traits": { + "smithy.api#documentation": "

The keys of the tags to remove from the resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberResource", + "ids": { + "OriginationPhoneNumberId": "resourceArn" + } + }, + { + "resource": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountResource", + "ids": { + "WabaId": "resourceArn" + } + } + ] + } + }, + "com.amazonaws.socialmessaging#UntagResourceOutput": { + "type": "structure", + "members": { + "statusCode": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

The status code of the untag resource operation.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.socialmessaging#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.socialmessaging#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

The request contains an invalid parameter value.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.socialmessaging#WabaPhoneNumberSetupFinalization": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumber", + "traits": { + "smithy.api#documentation": "

The unique identifier of the originating phone number associated with the media. Phone\n number identifiers are formatted as\n phone-number-id-01234567890123456789012345678901. Use\n GetLinkedWhatsAppBusinessAccount to find a phone number's\n id.

", + "smithy.api#required": {} + } + }, + "twoFactorPin": { + "target": "com.amazonaws.socialmessaging#TwoFactorPin", + "traits": { + "smithy.api#documentation": "

The PIN to use for two-step verification. To reset your PIN follow the directions in\n Updating PIN in the WhatsApp Business Platform Cloud API\n Reference.

", + "smithy.api#required": {} + } + }, + "dataLocalizationRegion": { + "target": "com.amazonaws.socialmessaging#IsoCountryCode", + "traits": { + "smithy.api#documentation": "

The two letter ISO region for the location of where Meta will store data.

\n

\n Asia–Pacific (APAC)\n

\n
    \n
  • \n

    Australia AU\n

    \n
  • \n
  • \n

    Indonesia ID\n

    \n
  • \n
  • \n

    India IN\n

    \n
  • \n
  • \n

    Japan JP\n

    \n
  • \n
  • \n

    Singapore SG\n

    \n
  • \n
  • \n

    South Korea KR\n

    \n
  • \n
\n

\n Europe\n

\n
    \n
  • \n

    Germany DE\n

    \n
  • \n
  • \n

    Switzerland CH\n

    \n
  • \n
  • \n

    United Kingdom GB\n

    \n
  • \n
\n

\n Latin America (LATAM)\n

\n
    \n
  • \n

    Brazil BR\n

    \n
  • \n
\n

\n Middle East and Africa (MEA)\n

\n
    \n
  • \n

    Bahrain BH\n

    \n
  • \n
  • \n

    South Africa ZA\n

    \n
  • \n
  • \n

    United Arab Emirates AE\n

    \n
  • \n
\n

\n North America (NORAM)\n

\n
    \n
  • \n

    Canada CA\n

    \n
  • \n
" + } + }, + "tags": { + "target": "com.amazonaws.socialmessaging#TagList", + "traits": { + "smithy.api#documentation": "

An array of key and value pair tags.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The registration details for a linked phone number.

" + } + }, + "com.amazonaws.socialmessaging#WabaPhoneNumberSetupFinalizationList": { + "type": "list", + "member": { + "target": "com.amazonaws.socialmessaging#WabaPhoneNumberSetupFinalization" + } + }, + "com.amazonaws.socialmessaging#WabaSetupFinalization": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

The ID of the linked WhatsApp Business Account, formatted as waba-01234567890123456789012345678901.

" + } + }, + "eventDestinations": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestinations", + "traits": { + "smithy.api#documentation": "

The event destinations for the linked WhatsApp Business Account.

" + } + }, + "tags": { + "target": "com.amazonaws.socialmessaging#TagList", + "traits": { + "smithy.api#documentation": "

An array of key and value pair tags.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The registration details for a linked WhatsApp Business Account.

" + } + }, + "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestination": { + "type": "structure", + "members": { + "eventDestinationArn": { + "target": "com.amazonaws.socialmessaging#EventDestinationArn", + "traits": { + "smithy.api#documentation": "

The ARN of the event destination.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains information on the event destination.

" + } + }, + "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestinations": { + "type": "list", + "member": { + "target": "com.amazonaws.socialmessaging#WhatsAppBusinessAccountEventDestination" + }, + "traits": { + "smithy.api#length": { + "max": 1 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppBusinessAccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppBusinessAccountLinkDate": { + "type": "timestamp" + }, + "com.amazonaws.socialmessaging#WhatsAppBusinessAccountName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 200 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppDisplayPhoneNumber": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 20 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppMediaId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[A-Za-z0-9]+$" + } + }, + "com.amazonaws.socialmessaging#WhatsAppMessageBlob": { + "type": "blob", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048000 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumber": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberDetail": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberArn", + "traits": { + "smithy.api#documentation": "

The ARN of the WhatsApp phone number.

", + "smithy.api#required": {} + } + }, + "phoneNumber": { + "target": "com.amazonaws.socialmessaging#PhoneNumber", + "traits": { + "smithy.api#documentation": "

The phone number for sending WhatsApp.

", + "smithy.api#required": {} + } + }, + "phoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The phone number ID. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901.

", + "smithy.api#required": {} + } + }, + "metaPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumber", + "traits": { + "smithy.api#documentation": "

The phone number ID from Meta.

", + "smithy.api#required": {} + } + }, + "displayPhoneNumberName": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberName", + "traits": { + "smithy.api#documentation": "

The display name for this phone number.

", + "smithy.api#required": {} + } + }, + "displayPhoneNumber": { + "target": "com.amazonaws.socialmessaging#WhatsAppDisplayPhoneNumber", + "traits": { + "smithy.api#documentation": "

The phone number that appears in the recipients display.

", + "smithy.api#required": {} + } + }, + "qualityRating": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberQualityRating", + "traits": { + "smithy.api#documentation": "

The quality rating of the phone number.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of your WhatsApp phone number.

" + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberDetailList": { + "type": "list", + "member": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberDetail" + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "(^phone-number-id-.*$)|(^arn:.*:phone-number-id/[0-9a-zA-Z]+$)" + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberName": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 200 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberQualityRating": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 10 + } + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberSummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppPhoneNumberArn", + "traits": { + "smithy.api#documentation": "

The full Amazon Resource Name (ARN) for the phone number.

", + "smithy.api#required": {} + } + }, + "phoneNumber": { + "target": "com.amazonaws.socialmessaging#PhoneNumber", + "traits": { + "smithy.api#documentation": "

The phone number associated with the Linked WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "phoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberId", + "traits": { + "smithy.api#documentation": "

The phone number ID. Phone number identifiers are formatted as phone-number-id-01234567890123456789012345678901.

", + "smithy.api#required": {} + } + }, + "metaPhoneNumberId": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumber", + "traits": { + "smithy.api#documentation": "

The phone number ID from Meta.

", + "smithy.api#required": {} + } + }, + "displayPhoneNumberName": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberName", + "traits": { + "smithy.api#documentation": "

The display name for this phone number.

", + "smithy.api#required": {} + } + }, + "displayPhoneNumber": { + "target": "com.amazonaws.socialmessaging#WhatsAppDisplayPhoneNumber", + "traits": { + "smithy.api#documentation": "

The phone number that appears in the recipients display.

", + "smithy.api#required": {} + } + }, + "qualityRating": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberQualityRating", + "traits": { + "smithy.api#documentation": "

The quality rating of the phone number. This is from Meta.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of a linked phone number.

" + } + }, + "com.amazonaws.socialmessaging#WhatsAppPhoneNumberSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.socialmessaging#WhatsAppPhoneNumberSummary" + } + }, + "com.amazonaws.socialmessaging#WhatsAppSetupFinalization": { + "type": "structure", + "members": { + "associateInProgressToken": { + "target": "com.amazonaws.socialmessaging#AssociateInProgressToken", + "traits": { + "smithy.api#documentation": "

An Amazon Web Services access token generated by WhatsAppSignupCallback and used by WhatsAppSetupFinalization.

", + "smithy.api#required": {} + } + }, + "phoneNumbers": { + "target": "com.amazonaws.socialmessaging#WabaPhoneNumberSetupFinalizationList", + "traits": { + "smithy.api#documentation": "

An array of WabaPhoneNumberSetupFinalization objects containing the details of each phone number associated with the WhatsApp Business Account.

", + "smithy.api#required": {} + } + }, + "phoneNumberParent": { + "target": "com.amazonaws.socialmessaging#LinkedWhatsAppBusinessAccountId", + "traits": { + "smithy.api#documentation": "

Used to add a new phone number to an existing WhatsApp Business Account. This field can't be used when the waba field is present.

" + } + }, + "waba": { + "target": "com.amazonaws.socialmessaging#WabaSetupFinalization", + "traits": { + "smithy.api#documentation": "

Used to create a new WhatsApp Business Account and add a phone number. This field can't be used when the phoneNumberParent field is present.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of linking a WhatsApp Business Account to your Amazon Web Services account.

" + } + }, + "com.amazonaws.socialmessaging#WhatsAppSignupCallback": { + "type": "structure", + "members": { + "accessToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

The access token for your WhatsApp Business Account. The accessToken value is provided by Meta.

", + "smithy.api#length": { + "max": 1000 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the accessToken provided by Meta during signup.

" + } + }, + "com.amazonaws.socialmessaging#WhatsAppSignupCallbackResult": { + "type": "structure", + "members": { + "associateInProgressToken": { + "target": "com.amazonaws.socialmessaging#AssociateInProgressToken", + "traits": { + "smithy.api#documentation": "

An Amazon Web Services access token generated by WhatsAppSignupCallback and used by WhatsAppSetupFinalization.

" + } + }, + "linkedAccountsWithIncompleteSetup": { + "target": "com.amazonaws.socialmessaging#LinkedAccountWithIncompleteSetup", + "traits": { + "smithy.api#documentation": "

A LinkedWhatsAppBusinessAccountIdMetaData object map containing the details of any WhatsAppBusiness accounts that have incomplete setup.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Contains the results of WhatsAppSignupCallback.

" + } + } + } +} \ No newline at end of file diff --git a/models/ssm.json b/models/ssm.json index 892e047223..1e1a78ef1e 100644 --- a/models/ssm.json +++ b/models/ssm.json @@ -3193,7 +3193,7 @@ "Values": { "target": "com.amazonaws.ssm#AttachmentsSourceValues", "traits": { - "smithy.api#documentation": "

The value of a key-value pair that identifies the location of an attachment to a document.\n The format for Value depends on the type of key you\n specify.

\n
    \n
  • \n

    For the key SourceUrl, the value is an S3 bucket location. For\n example:

    \n

    \n \"Values\": [ \"s3://doc-example-bucket/my-folder\" ]\n

    \n
  • \n
  • \n

    For the key S3FileUrl, the value is a file in an S3 bucket. For\n example:

    \n

    \n \"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]\n

    \n
  • \n
  • \n

    For the key AttachmentReference, the value is constructed from the\n name of another SSM document in your account, a version number of that document, and a file\n attached to that document version that you want to reuse. For example:

    \n

    \n \"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]\n

    \n

    However, if the SSM document is shared with you from another account, the full SSM\n document ARN must be specified instead of the document name only. For example:

    \n

    \n \"Values\": [\n \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\"\n ]\n

    \n
  • \n
" + "smithy.api#documentation": "

The value of a key-value pair that identifies the location of an attachment to a document.\n The format for Value depends on the type of key you\n specify.

\n
    \n
  • \n

    For the key SourceUrl, the value is an S3 bucket location. For\n example:

    \n

    \n \"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]\n

    \n
  • \n
  • \n

    For the key S3FileUrl, the value is a file in an S3 bucket. For\n example:

    \n

    \n \"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]\n

    \n
  • \n
  • \n

    For the key AttachmentReference, the value is constructed from the\n name of another SSM document in your account, a version number of that document, and a file\n attached to that document version that you want to reuse. For example:

    \n

    \n \"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]\n

    \n

    However, if the SSM document is shared with you from another account, the full SSM\n document ARN must be specified instead of the document name only. For example:

    \n

    \n \"Values\": [\n \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\"\n ]\n

    \n
  • \n
" } }, "Name": { @@ -3501,6 +3501,12 @@ "smithy.api#documentation": "

The CloudWatch alarm that was invoked by the automation.

" } }, + "TargetLocationsURL": { + "target": "com.amazonaws.ssm#TargetLocationsURL", + "traits": { + "smithy.api#documentation": "

A publicly accessible URL for a file that contains the TargetLocations body.\n Currently, only files in presigned Amazon S3 buckets are supported

" + } + }, "AutomationSubtype": { "target": "com.amazonaws.ssm#AutomationSubtype", "traits": { @@ -3837,7 +3843,7 @@ "AutomationType": { "target": "com.amazonaws.ssm#AutomationType", "traits": { - "smithy.api#documentation": "

Use this filter with DescribeAutomationExecutions. Specify either Local or\n CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and\n Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and accounts in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

Use this filter with DescribeAutomationExecutions. Specify either Local or\n CrossAccount. CrossAccount is an Automation that runs in multiple Amazon Web Services Regions and\n Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the\n Amazon Web Services Systems Manager User Guide.

" } }, "AlarmConfiguration": { @@ -3852,6 +3858,12 @@ "smithy.api#documentation": "

The CloudWatch alarm that was invoked by the automation.

" } }, + "TargetLocationsURL": { + "target": "com.amazonaws.ssm#TargetLocationsURL", + "traits": { + "smithy.api#documentation": "

A publicly accessible URL for a file that contains the TargetLocations body.\n Currently, only files in presigned Amazon S3 buckets are supported

" + } + }, "AutomationSubtype": { "target": "com.amazonaws.ssm#AutomationSubtype", "traits": { @@ -4179,7 +4191,7 @@ "ApprovedPatches": { "target": "com.amazonaws.ssm#PatchIdList", "traits": { - "smithy.api#documentation": "

A list of explicitly approved patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see About\n package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A list of explicitly approved patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see Package\n name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" } }, "ApprovedPatchesComplianceLevel": { @@ -4191,7 +4203,7 @@ "RejectedPatches": { "target": "com.amazonaws.ssm#PatchIdList", "traits": { - "smithy.api#documentation": "

A list of explicitly rejected patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see About\n package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A list of explicitly rejected patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see Package\n name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" } }, "RejectedPatchesAction": { @@ -4635,7 +4647,7 @@ "value": { "target": "com.amazonaws.ssm#CommandFilterValue", "traits": { - "smithy.api#documentation": "

The filter value. Valid values for each filter key are as follows:

\n
    \n
  • \n

    \n InvokedAfter: Specify a timestamp to limit your results.\n For example, specify 2021-07-07T00:00:00Z to see a list of command executions\n occurring July 7, 2021, and later.

    \n
  • \n
  • \n

    \n InvokedBefore: Specify a timestamp to limit your results.\n For example, specify 2021-07-07T00:00:00Z to see a list of command executions from\n before July 7, 2021.

    \n
  • \n
  • \n

    \n Status: Specify a valid command status to see a list of\n all command executions with that status. The status choices depend on the API you call.

    \n

    The status values you can specify for ListCommands are:

    \n
      \n
    • \n

      \n Pending\n

      \n
    • \n
    • \n

      \n InProgress\n

      \n
    • \n
    • \n

      \n Success\n

      \n
    • \n
    • \n

      \n Cancelled\n

      \n
    • \n
    • \n

      \n Failed\n

      \n
    • \n
    • \n

      \n TimedOut (this includes both Delivery and Execution time outs)

      \n
    • \n
    • \n

      \n AccessDenied\n

      \n
    • \n
    • \n

      \n DeliveryTimedOut\n

      \n
    • \n
    • \n

      \n ExecutionTimedOut\n

      \n
    • \n
    • \n

      \n Incomplete\n

      \n
    • \n
    • \n

      \n NoInstancesInTag\n

      \n
    • \n
    • \n

      \n LimitExceeded\n

      \n
    • \n
    \n

    The status values you can specify for ListCommandInvocations are:

    \n
      \n
    • \n

      \n Pending\n

      \n
    • \n
    • \n

      \n InProgress\n

      \n
    • \n
    • \n

      \n Delayed\n

      \n
    • \n
    • \n

      \n Success\n

      \n
    • \n
    • \n

      \n Cancelled\n

      \n
    • \n
    • \n

      \n Failed\n

      \n
    • \n
    • \n

      \n TimedOut (this includes both Delivery and Execution time outs)

      \n
    • \n
    • \n

      \n AccessDenied\n

      \n
    • \n
    • \n

      \n DeliveryTimedOut\n

      \n
    • \n
    • \n

      \n ExecutionTimedOut\n

      \n
    • \n
    • \n

      \n Undeliverable\n

      \n
    • \n
    • \n

      \n InvalidPlatform\n

      \n
    • \n
    • \n

      \n Terminated\n

      \n
    • \n
    \n
  • \n
  • \n

    \n DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM\n document) for which you want to see command execution results. For example, specify\n AWS-RunPatchBaseline to see command executions that used this SSM document to\n perform security patching operations on managed nodes.

    \n
  • \n
  • \n

    \n ExecutionStage: Specify one of the following values\n (ListCommands operations only):

    \n
      \n
    • \n

      \n Executing: Returns a list of command executions that are currently still\n running.

      \n
    • \n
    • \n

      \n Complete: Returns a list of command executions that have already completed.\n

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The filter value. Valid values for each filter key are as follows:

\n
    \n
  • \n

    \n InvokedAfter: Specify a timestamp to limit your results.\n For example, specify 2024-07-07T00:00:00Z to see a list of command executions\n occurring July 7, 2021, and later.

    \n
  • \n
  • \n

    \n InvokedBefore: Specify a timestamp to limit your results.\n For example, specify 2024-07-07T00:00:00Z to see a list of command executions from\n before July 7, 2021.

    \n
  • \n
  • \n

    \n Status: Specify a valid command status to see a list of\n all command executions with that status. The status choices depend on the API you call.

    \n

    The status values you can specify for ListCommands are:

    \n
      \n
    • \n

      \n Pending\n

      \n
    • \n
    • \n

      \n InProgress\n

      \n
    • \n
    • \n

      \n Success\n

      \n
    • \n
    • \n

      \n Cancelled\n

      \n
    • \n
    • \n

      \n Failed\n

      \n
    • \n
    • \n

      \n TimedOut (this includes both Delivery and Execution time outs)

      \n
    • \n
    • \n

      \n AccessDenied\n

      \n
    • \n
    • \n

      \n DeliveryTimedOut\n

      \n
    • \n
    • \n

      \n ExecutionTimedOut\n

      \n
    • \n
    • \n

      \n Incomplete\n

      \n
    • \n
    • \n

      \n NoInstancesInTag\n

      \n
    • \n
    • \n

      \n LimitExceeded\n

      \n
    • \n
    \n

    The status values you can specify for ListCommandInvocations are:

    \n
      \n
    • \n

      \n Pending\n

      \n
    • \n
    • \n

      \n InProgress\n

      \n
    • \n
    • \n

      \n Delayed\n

      \n
    • \n
    • \n

      \n Success\n

      \n
    • \n
    • \n

      \n Cancelled\n

      \n
    • \n
    • \n

      \n Failed\n

      \n
    • \n
    • \n

      \n TimedOut (this includes both Delivery and Execution time outs)

      \n
    • \n
    • \n

      \n AccessDenied\n

      \n
    • \n
    • \n

      \n DeliveryTimedOut\n

      \n
    • \n
    • \n

      \n ExecutionTimedOut\n

      \n
    • \n
    • \n

      \n Undeliverable\n

      \n
    • \n
    • \n

      \n InvalidPlatform\n

      \n
    • \n
    • \n

      \n Terminated\n

      \n
    • \n
    \n
  • \n
  • \n

    \n DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM\n document) for which you want to see command execution results. For example, specify\n AWS-RunPatchBaseline to see command executions that used this SSM document to\n perform security patching operations on managed nodes.

    \n
  • \n
  • \n

    \n ExecutionStage: Specify one of the following values\n (ListCommands operations only):

    \n
      \n
    • \n

      \n Executing: Returns a list of command executions that are currently still\n running.

      \n
    • \n
    • \n

      \n Complete: Returns a list of command executions that have already completed.\n

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } } @@ -4954,13 +4966,13 @@ "OutputS3BucketName": { "target": "com.amazonaws.ssm#S3BucketName", "traits": { - "smithy.api#documentation": "

The S3 bucket where the responses to the command executions should be stored. This was\n requested when issuing the command. For example, in the following response:

\n

\n doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript\n

\n

\n doc-example-bucket is the name of the S3 bucket;

\n

\n ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

\n

\n i-02573cafcfEXAMPLE is the managed node ID;

\n

\n awsrunShellScript is the name of the plugin.

" + "smithy.api#documentation": "

The S3 bucket where the responses to the command executions should be stored. This was\n requested when issuing the command. For example, in the following response:

\n

\n amzn-s3-demo-bucket/my-prefix/i-02573cafcfEXAMPLE/awsrunShellScript\n

\n

\n amzn-s3-demo-bucket is the name of the S3 bucket;

\n

\n my-prefix is the name of the S3 prefix;

\n

\n i-02573cafcfEXAMPLE is the managed node ID;

\n

\n awsrunShellScript is the name of the plugin.

" } }, "OutputS3KeyPrefix": { "target": "com.amazonaws.ssm#S3KeyPrefix", "traits": { - "smithy.api#documentation": "

The S3 directory path inside the bucket where the responses to the command executions should\n be stored. This was requested when issuing the command. For example, in the following\n response:

\n

\n doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript\n

\n

\n doc-example-bucket is the name of the S3 bucket;

\n

\n ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

\n

\n i-02573cafcfEXAMPLE is the managed node ID;

\n

\n awsrunShellScript is the name of the plugin.

" + "smithy.api#documentation": "

The S3 directory path inside the bucket where the responses to the command executions should\n be stored. This was requested when issuing the command. For example, in the following\n response:

\n

\n amzn-s3-demo-bucket/my-prefix/i-02573cafcfEXAMPLE/awsrunShellScript\n

\n

\n amzn-s3-demo-bucket is the name of the S3 bucket;

\n

\n my-prefix is the name of the S3 prefix;

\n

\n i-02573cafcfEXAMPLE is the managed node ID;

\n

\n awsrunShellScript is the name of the plugin.

" } } }, @@ -5641,7 +5653,7 @@ } ], "traits": { - "smithy.api#documentation": "

Generates an activation code and activation ID you can use to register your on-premises\n servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with\n Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and\n ID when installing SSM Agent on machines in your hybrid environment. For more information about\n requirements for managing on-premises machines using Systems Manager, see Setting up\n Amazon Web Services Systems Manager for hybrid and multicloud environments in the\n Amazon Web Services Systems Manager User Guide.

\n \n

Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are\n configured for Systems Manager are all called managed nodes.

\n
" + "smithy.api#documentation": "

Generates an activation code and activation ID you can use to register your on-premises\n servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with\n Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and\n ID when installing SSM Agent on machines in your hybrid environment. For more information about\n requirements for managing on-premises machines using Systems Manager, see Using Amazon Web Services Systems Manager in\n hybrid and multicloud environments in the Amazon Web Services Systems Manager User Guide.

\n \n

Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are\n configured for Systems Manager are all called managed nodes.

\n
" } }, "com.amazonaws.ssm#CreateActivationRequest": { @@ -5662,7 +5674,7 @@ "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an\n IAM service role for a hybrid and multicloud environment in the\n Amazon Web Services Systems Manager User Guide.

\n \n

You can't specify an IAM service-linked role for this parameter. You must\n create a unique role.

\n
", + "smithy.api#documentation": "

The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in a hybrid and multicloud\n environments in the Amazon Web Services Systems Manager User Guide.

\n \n

You can't specify an IAM service-linked role for this parameter. You must\n create a unique role.

\n
", "smithy.api#required": {} } }, @@ -5675,7 +5687,7 @@ "ExpirationDate": { "target": "com.amazonaws.ssm#ExpirationDate", "traits": { - "smithy.api#documentation": "

The date by which this activation request should expire, in timestamp format, such as\n \"2021-07-07T00:00:00\". You can specify a date up to 30 days in advance. If you don't provide an\n expiration date, the activation code expires in 24 hours.

" + "smithy.api#documentation": "

The date by which this activation request should expire, in timestamp format, such as\n \"2024-07-07T00:00:00\". You can specify a date up to 30 days in advance. If you don't provide an\n expiration date, the activation code expires in 24 hours.

" } }, "Tags": { @@ -6025,7 +6037,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource\n groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all\n managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of\n *. For more information about choosing targets for an association, see About targets and rate controls in State Manager associations in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource\n groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all\n managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of\n *. For more information about choosing targets for an association, see Understanding targets and rate controls in State Manager associations in the\n Amazon Web Services Systems Manager User Guide.

" } }, "ScheduleExpression": { @@ -6170,7 +6182,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs\n on your managed nodes. For more information about SSM documents, including information about\n supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs\n on your managed nodes. For more information about SSM documents, including information about\n supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the\n Amazon Web Services Systems Manager User Guide.

" } }, "com.amazonaws.ssm#CreateDocumentRequest": { @@ -6662,7 +6674,7 @@ "ApprovedPatches": { "target": "com.amazonaws.ssm#PatchIdList", "traits": { - "smithy.api#documentation": "

A list of explicitly approved patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see About\n package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A list of explicitly approved patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see Package\n name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" } }, "ApprovedPatchesComplianceLevel": { @@ -6681,7 +6693,7 @@ "RejectedPatches": { "target": "com.amazonaws.ssm#PatchIdList", "traits": { - "smithy.api#documentation": "

A list of explicitly rejected patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see About\n package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A list of explicitly rejected patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see Package\n name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" } }, "RejectedPatchesAction": { @@ -6757,7 +6769,7 @@ } ], "traits": { - "smithy.api#documentation": "

A resource data sync helps you view data from multiple sources in a single location.\n Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and\n SyncFromSource.

\n

You can configure Systems Manager Inventory to use the SyncToDestination type to\n synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Configuring resource data\n sync for Inventory in the Amazon Web Services Systems Manager User Guide.

\n

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize\n operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a\n single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple\n Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more\n information, see Setting up Systems Manager\n Explorer to display data from multiple accounts and Regions in the\n Amazon Web Services Systems Manager User Guide.

\n

A resource data sync is an asynchronous operation that returns immediately. After a\n successful initial sync is completed, the system continuously syncs data. To check the status of\n a sync, use the ListResourceDataSync.

\n \n

By default, data isn't encrypted in Amazon S3. We strongly recommend that you\n enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you\n secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

\n
" + "smithy.api#documentation": "

A resource data sync helps you view data from multiple sources in a single location.\n Amazon Web Services Systems Manager offers two types of resource data sync: SyncToDestination and\n SyncFromSource.

\n

You can configure Systems Manager Inventory to use the SyncToDestination type to\n synchronize Inventory data from multiple Amazon Web Services Regions to a single Amazon Simple Storage Service (Amazon S3) bucket. For more information, see Creatinga a\n resource data sync for Inventory in the Amazon Web Services Systems Manager User Guide.

\n

You can configure Systems Manager Explorer to use the SyncFromSource type to synchronize\n operational work items (OpsItems) and operational data (OpsData) from multiple Amazon Web Services Regions to a\n single Amazon S3 bucket. This type can synchronize OpsItems and OpsData from multiple\n Amazon Web Services accounts and Amazon Web Services Regions or EntireOrganization by using Organizations. For more\n information, see Setting up Systems Manager\n Explorer to display data from multiple accounts and Regions in the\n Amazon Web Services Systems Manager User Guide.

\n

A resource data sync is an asynchronous operation that returns immediately. After a\n successful initial sync is completed, the system continuously syncs data. To check the status of\n a sync, use the ListResourceDataSync.

\n \n

By default, data isn't encrypted in Amazon S3. We strongly recommend that you\n enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you\n secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

\n
" } }, "com.amazonaws.ssm#CreateResourceDataSyncRequest": { @@ -7097,7 +7109,7 @@ "DeletionSummary": { "target": "com.amazonaws.ssm#InventoryDeletionSummary", "traits": { - "smithy.api#documentation": "

A summary of the delete operation. For more information about this summary, see Understanding the delete inventory summary in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A summary of the delete operation. For more information about this summary, see Deleting custom inventory in the Amazon Web Services Systems Manager User Guide.

" } } }, @@ -9030,7 +9042,7 @@ "Filters": { "target": "com.amazonaws.ssm#PatchOrchestratorFilterList", "traits": { - "smithy.api#documentation": "

Each element in the array is a structure containing a key-value pair.

\n

Supported keys for DescribeInstancePatchesinclude the following:

\n
    \n
  • \n

    \n \n Classification\n \n

    \n

    Sample values: Security | SecurityUpdates\n

    \n
  • \n
  • \n

    \n \n KBId\n \n

    \n

    Sample values: KB4480056 | java-1.7.0-openjdk.x86_64\n

    \n
  • \n
  • \n

    \n \n Severity\n \n

    \n

    Sample values: Important | Medium | Low\n

    \n
  • \n
  • \n

    \n \n State\n \n

    \n

    Sample values: Installed | InstalledOther |\n InstalledPendingReboot\n

    \n

    For lists of all State values, see Understanding\n patch compliance state values in the Amazon Web Services Systems Manager User Guide.

    \n
  • \n
" + "smithy.api#documentation": "

Each element in the array is a structure containing a key-value pair.

\n

Supported keys for DescribeInstancePatchesinclude the following:

\n
    \n
  • \n

    \n \n Classification\n \n

    \n

    Sample values: Security | SecurityUpdates\n

    \n
  • \n
  • \n

    \n \n KBId\n \n

    \n

    Sample values: KB4480056 | java-1.7.0-openjdk.x86_64\n

    \n
  • \n
  • \n

    \n \n Severity\n \n

    \n

    Sample values: Important | Medium | Low\n

    \n
  • \n
  • \n

    \n \n State\n \n

    \n

    Sample values: Installed | InstalledOther |\n InstalledPendingReboot\n

    \n

    For lists of all State values, see Patch compliance\n state values in the Amazon Web Services Systems Manager User Guide.

    \n
  • \n
" } }, "NextToken": { @@ -9448,7 +9460,7 @@ "Filters": { "target": "com.amazonaws.ssm#MaintenanceWindowFilterList", "traits": { - "smithy.api#documentation": "

Each entry in the array is a structure containing:

\n
    \n
  • \n

    Key. A string between 1 and 128 characters. Supported keys include\n ExecutedBefore and ExecutedAfter.

    \n
  • \n
  • \n

    Values. An array of strings, each between 1 and 256 characters. Supported values are\n date/time strings in a valid ISO 8601 date/time format, such as\n 2021-11-04T05:00:00Z.

    \n
  • \n
" + "smithy.api#documentation": "

Each entry in the array is a structure containing:

\n
    \n
  • \n

    Key. A string between 1 and 128 characters. Supported keys include\n ExecutedBefore and ExecutedAfter.

    \n
  • \n
  • \n

    Values. An array of strings, each between 1 and 256 characters. Supported values are\n date/time strings in a valid ISO 8601 date/time format, such as\n 2024-11-04T05:00:00Z.

    \n
  • \n
" } }, "MaxResults": { @@ -11796,6 +11808,28 @@ "smithy.api#default": 0 } }, + "com.amazonaws.ssm#ExcludeAccount": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 6, + "max": 68 + }, + "smithy.api#pattern": "^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32})|(\\d{12})$" + } + }, + "com.amazonaws.ssm#ExcludeAccounts": { + "type": "list", + "member": { + "target": "com.amazonaws.ssm#ExcludeAccount" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5000 + } + } + }, "com.amazonaws.ssm#ExecutionMode": { "type": "enum", "members": { @@ -13408,7 +13442,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TaskType": { @@ -13845,7 +13879,7 @@ "Name": { "target": "com.amazonaws.ssm#PSParameterName", "traits": { - "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of the parameter that you want to query. For\n parameters shared with you from another account, you must use the full ARN.

\n

To query by parameter label, use \"Name\": \"name:label\". To query by parameter\n version, use \"Name\": \"name:version\".

\n

For more information about shared parameters, see Working with shared parameters in\n the Amazon Web Services Systems Manager User Guide.

", + "smithy.api#documentation": "

The name or Amazon Resource Name (ARN) of the parameter that you want to query. For\n parameters shared with you from another account, you must use the full ARN.

\n

To query by parameter label, use \"Name\": \"name:label\". To query by parameter\n version, use \"Name\": \"name:version\".

\n

For more information about shared parameters, see Working with\n shared parameters in the Amazon Web Services Systems Manager User Guide.

", "smithy.api#required": {} } }, @@ -14815,7 +14849,7 @@ "Name": { "target": "com.amazonaws.ssm#String", "traits": { - "smithy.api#documentation": "

The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is\n activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName\n property using the CreateActivation command. It is applied to the managed node\n by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as\n explained in Install SSM Agent for a\n hybrid and multicloud environment (Linux) and Install SSM Agent for a\n hybrid and multicloud environment (Windows). To retrieve the Name tag of an\n EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" + "smithy.api#documentation": "

The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is\n activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName\n property using the CreateActivation command. It is applied to the managed node\n by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as\n explained in How to\n install SSM Agent on hybrid Linux nodes and How to\n install SSM Agent on hybrid Windows Server nodes. To retrieve the Name tag\n of an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information, see\n DescribeInstances in the Amazon EC2 API Reference or describe-instances in the Amazon Web Services CLI Command Reference.

" } }, "IPAddress": { @@ -15078,7 +15112,7 @@ "InstallOverrideList": { "target": "com.amazonaws.ssm#InstallOverrideList", "traits": { - "smithy.api#documentation": "

An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of\n patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML\n format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches\n specified by the default patch baseline.

\n

For more information about the InstallOverrideList parameter, see About the\n AWS-RunPatchBaseline SSM document\n in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

An https URL or an Amazon Simple Storage Service (Amazon S3) path-style URL to a list of\n patches to be installed. This patch installation list, which you maintain in an S3 bucket in YAML\n format and specify in the SSM document AWS-RunPatchBaseline, overrides the patches\n specified by the default patch baseline.

\n

For more information about the InstallOverrideList parameter, see SSM Command\n document for patching: AWS-RunPatchBaseline\n in the\n Amazon Web Services Systems Manager User Guide.

" } }, "OwnerInformation": { @@ -16673,7 +16707,7 @@ "DeletionSummary": { "target": "com.amazonaws.ssm#InventoryDeletionSummary", "traits": { - "smithy.api#documentation": "

Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

Information about the delete operation. For more information about this summary, see Understanding the delete inventory summary in the\n Amazon Web Services Systems Manager User Guide.

" } }, "LastStatusUpdateTime": { @@ -16775,7 +16809,7 @@ "Type": { "target": "com.amazonaws.ssm#InventoryQueryOperatorType", "traits": { - "smithy.api#documentation": "

The type of filter.

\n \n

The Exists filter must be used with aggregators. For more information, see\n Aggregating inventory\n data in the Amazon Web Services Systems Manager User Guide.

\n
" + "smithy.api#documentation": "

The type of filter.

\n \n

The Exists filter must be used with aggregators. For more information, see\n Aggregating inventory data in the Amazon Web Services Systems Manager User Guide.

\n
" } } }, @@ -19516,7 +19550,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TimeoutSeconds": { @@ -19710,7 +19744,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "MaxConcurrency": { @@ -20790,7 +20824,7 @@ "Status": { "target": "com.amazonaws.ssm#OpsItemStatus", "traits": { - "smithy.api#documentation": "

The OpsItem status. Status can be Open, In Progress, or\n Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.

" } }, "OpsItemId": { @@ -21922,7 +21956,7 @@ "Status": { "target": "com.amazonaws.ssm#OpsItemStatus", "traits": { - "smithy.api#documentation": "

The OpsItem status. Status can be Open, In Progress, or\n Resolved.

" + "smithy.api#documentation": "

The OpsItem status.

" } }, "OpsItemId": { @@ -23343,7 +23377,7 @@ "State": { "target": "com.amazonaws.ssm#PatchComplianceDataState", "traits": { - "smithy.api#documentation": "

The state of the patch on the managed node, such as INSTALLED or FAILED.

\n

For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.

", + "smithy.api#documentation": "

The state of the patch on the managed node, such as INSTALLED or FAILED.

\n

For descriptions of each patch state, see About\n patch compliance in the Amazon Web Services Systems Manager User Guide.

", "smithy.api#required": {} } }, @@ -23997,13 +24031,13 @@ "target": "com.amazonaws.ssm#ApproveAfterDays", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The number of days after the release date of each patch matched by the rule that the patch\n is marked as approved in the patch baseline. For example, a value of 7 means that\n patches are approved seven days after they are released.

\n \n

This parameter is marked as not required, but your request must include a value\n for either ApproveAfterDays or ApproveUntilDate.

\n
\n

Not supported for Debian Server or Ubuntu Server.

" + "smithy.api#documentation": "

The number of days after the release date of each patch matched by the rule that the patch\n is marked as approved in the patch baseline. For example, a value of 7 means that\n patches are approved seven days after they are released.

\n

This parameter is marked as Required: No, but your request must include a value\n for either ApproveAfterDays or ApproveUntilDate.

\n

Not supported for Debian Server or Ubuntu Server.

\n \n

Use caution when setting this value for Windows Server patch baselines. Because patch\n updates that are replaced by later updates are removed, setting too broad a value for this\n parameter can result in crucial patches not being installed. For more information, see the\n Windows Server tab in the topic How security\n patches are selected in the Amazon Web Services Systems Manager User Guide.

\n
" } }, "ApproveUntilDate": { "target": "com.amazonaws.ssm#PatchStringDateTime", "traits": { - "smithy.api#documentation": "

The cutoff date for auto approval of released patches. Any patches released on or before\n this date are installed automatically.

\n

Enter dates in the format YYYY-MM-DD. For example,\n 2021-12-31.

\n \n

This parameter is marked as not required, but your request must include a value\n for either ApproveUntilDate or ApproveAfterDays.

\n
\n

Not supported for Debian Server or Ubuntu Server.

" + "smithy.api#documentation": "

The cutoff date for auto approval of released patches. Any patches released on or before\n this date are installed automatically.

\n

Enter dates in the format YYYY-MM-DD. For example,\n 2024-12-31.

\n

This parameter is marked as Required: No, but your request must include a value\n for either ApproveUntilDate or ApproveAfterDays.

\n

Not supported for Debian Server or Ubuntu Server.

\n \n

Use caution when setting this value for Windows Server patch baselines. Because patch\n updates that are replaced by later updates are removed, setting too broad a value for this\n parameter can result in crucial patches not being installed. For more information, see the\n Windows Server tab in the topic How security\n patches are selected in the Amazon Web Services Systems Manager User Guide.

\n
" } }, "EnableNonSecurity": { @@ -25082,7 +25116,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TaskType": { @@ -27005,7 +27039,7 @@ "value": { "target": "com.amazonaws.ssm#SessionFilterValue", "traits": { - "smithy.api#documentation": "

The filter value. Valid values for each filter key are as follows:

\n
    \n
  • \n

    InvokedAfter: Specify a timestamp to limit your results. For example, specify\n 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

    \n
  • \n
  • \n

    InvokedBefore: Specify a timestamp to limit your results. For example, specify\n 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

    \n
  • \n
  • \n

    Target: Specify a managed node to which session connections have been made.

    \n
  • \n
  • \n

    Owner: Specify an Amazon Web Services user to see a list of sessions started by that user.

    \n
  • \n
  • \n

    Status: Specify a valid session status to see a list of all sessions with that status.\n Status values you can specify include:

    \n
      \n
    • \n

      Connected

      \n
    • \n
    • \n

      Connecting

      \n
    • \n
    • \n

      Disconnected

      \n
    • \n
    • \n

      Terminated

      \n
    • \n
    • \n

      Terminating

      \n
    • \n
    • \n

      Failed

      \n
    • \n
    \n
  • \n
  • \n

    SessionId: Specify a session ID to return details about the session.

    \n
  • \n
", + "smithy.api#documentation": "

The filter value. Valid values for each filter key are as follows:

\n
    \n
  • \n

    InvokedAfter: Specify a timestamp to limit your results. For example, specify\n 2024-08-29T00:00:00Z to see sessions that started August 29, 2024, and later.

    \n
  • \n
  • \n

    InvokedBefore: Specify a timestamp to limit your results. For example, specify\n 2024-08-29T00:00:00Z to see sessions that started before August 29, 2024.

    \n
  • \n
  • \n

    Target: Specify a managed node to which session connections have been made.

    \n
  • \n
  • \n

    Owner: Specify an Amazon Web Services user to see a list of sessions started by that user.

    \n
  • \n
  • \n

    Status: Specify a valid session status to see a list of all sessions with that status.\n Status values you can specify include:

    \n
      \n
    • \n

      Connected

      \n
    • \n
    • \n

      Connecting

      \n
    • \n
    • \n

      Disconnected

      \n
    • \n
    • \n

      Terminated

      \n
    • \n
    • \n

      Terminating

      \n
    • \n
    • \n

      Failed

      \n
    • \n
    \n
  • \n
  • \n

    SessionId: Specify a session ID to return details about the session.

    \n
  • \n
", "smithy.api#required": {} } } @@ -27537,7 +27571,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

A key-value mapping to target resources. Required if you specify TargetParameterName.

" + "smithy.api#documentation": "

A key-value mapping to target resources. Required if you specify TargetParameterName.

\n

If both this parameter and the TargetLocation:Targets parameter are supplied,\n TargetLocation:Targets takes precedence.

" } }, "TargetMaps": { @@ -27549,19 +27583,19 @@ "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

The maximum number of targets allowed to run this task in parallel. You can specify a\n number, such as 10, or a percentage, such as 10%. The default value is 10.

" + "smithy.api#documentation": "

The maximum number of targets allowed to run this task in parallel. You can specify a\n number, such as 10, or a percentage, such as 10%. The default value is 10.

\n

If both this parameter and the TargetLocation:TargetsMaxConcurrency are\n supplied, TargetLocation:TargetsMaxConcurrency takes precedence.

" } }, "MaxErrors": { "target": "com.amazonaws.ssm#MaxErrors", "traits": { - "smithy.api#documentation": "

The number of errors that are allowed before the system stops running the automation on\n additional targets. You can specify either an absolute number of errors, for example 10, or a\n percentage of the target set, for example 10%. If you specify 3, for example, the system stops\n running the automation when the fourth error is received. If you specify 0, then the system stops\n running the automation on additional targets after the first error result is returned. If you run\n an automation on 50 resources and set max-errors to 10%, then the system stops running the\n automation on additional targets when the sixth error is received.

\n

Executions that are already running an automation when max-errors is reached are allowed to\n complete, but some of these executions may fail as well. If you need to ensure that there won't\n be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one\n at a time.

" + "smithy.api#documentation": "

The number of errors that are allowed before the system stops running the automation on\n additional targets. You can specify either an absolute number of errors, for example 10, or a\n percentage of the target set, for example 10%. If you specify 3, for example, the system stops\n running the automation when the fourth error is received. If you specify 0, then the system stops\n running the automation on additional targets after the first error result is returned. If you run\n an automation on 50 resources and set max-errors to 10%, then the system stops running the\n automation on additional targets when the sixth error is received.

\n

Executions that are already running an automation when max-errors is reached are allowed to\n complete, but some of these executions may fail as well. If you need to ensure that there won't\n be more than max-errors failed executions, set max-concurrency to 1 so the executions proceed one\n at a time.

\n

If this parameter and the TargetLocation:TargetsMaxErrors parameter are both\n supplied, TargetLocation:TargetsMaxErrors takes precedence.

" } }, "TargetLocations": { "target": "com.amazonaws.ssm#TargetLocations", "traits": { - "smithy.api#documentation": "

A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the\n automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple\n Amazon Web Services accounts. For more information, see Running Automation workflows in multiple Amazon Web Services Regions and Amazon Web Services accounts in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A location is a combination of Amazon Web Services Regions and/or Amazon Web Services accounts where you want to run the\n automation. Use this operation to start an automation in multiple Amazon Web Services Regions and multiple\n Amazon Web Services accounts. For more information, see Running automations in multiple Amazon Web Services Regions and accounts in the\n Amazon Web Services Systems Manager User Guide.

" } }, "Tags": { @@ -27575,6 +27609,12 @@ "traits": { "smithy.api#documentation": "

The CloudWatch alarm you want to apply to your automation.

" } + }, + "TargetLocationsURL": { + "target": "com.amazonaws.ssm#TargetLocationsURL", + "traits": { + "smithy.api#documentation": "

Specify a publicly accessible URL for a file that contains the TargetLocations\n body. Currently, only files in presigned Amazon S3 buckets are supported.

" + } } }, "traits": { @@ -28364,6 +28404,37 @@ }, "TargetLocationAlarmConfiguration": { "target": "com.amazonaws.ssm#AlarmConfiguration" + }, + "IncludeChildOrganizationUnits": { + "target": "com.amazonaws.ssm#Boolean", + "traits": { + "smithy.api#default": false, + "smithy.api#documentation": "

Indicates whether to include child organizational units (OUs) that are children of the\n targeted OUs. The default is false.

" + } + }, + "ExcludeAccounts": { + "target": "com.amazonaws.ssm#ExcludeAccounts", + "traits": { + "smithy.api#documentation": "

Amazon Web Services accounts or organizational units to exclude as expanded targets.

" + } + }, + "Targets": { + "target": "com.amazonaws.ssm#Targets", + "traits": { + "smithy.api#documentation": "

A list of key-value mappings to target resources. If you specify values for this data type,\n you must also specify a value for TargetParameterName.

\n

This Targets parameter takes precedence over the\n StartAutomationExecution:Targets parameter if both are supplied.

" + } + }, + "TargetsMaxConcurrency": { + "target": "com.amazonaws.ssm#MaxConcurrency", + "traits": { + "smithy.api#documentation": "

The maximum number of targets allowed to run this task in parallel. This\n TargetsMaxConcurrency takes precedence over the\n StartAutomationExecution:MaxConcurrency parameter if both are supplied.

" + } + }, + "TargetsMaxErrors": { + "target": "com.amazonaws.ssm#MaxErrors", + "traits": { + "smithy.api#documentation": "

The maximum number of errors that are allowed before the system stops running the automation\n on additional targets. This TargetsMaxErrors parameter takes precedence over the\n StartAutomationExecution:MaxErrors parameter if both are supplied.

" + } } }, "traits": { @@ -28382,6 +28453,12 @@ } } }, + "com.amazonaws.ssm#TargetLocationsURL": { + "type": "string", + "traits": { + "smithy.api#pattern": "^https:\\/\\/[-a-zA-Z0-9@:%._\\+~#=]{1,253}\\.s3(\\.[a-z\\d-]{9,16})?\\.amazonaws\\.com\\/.{1,2000}$" + } + }, "com.amazonaws.ssm#TargetMap": { "type": "map", "key": { @@ -28451,7 +28528,7 @@ "code": "TargetNotConnected", "httpResponseCode": 430 }, - "smithy.api#documentation": "

The specified target managed node for the session isn't fully configured for use with Session Manager.\n For more information, see Getting started with\n Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you\n attempt to start a session on a managed node that is located in a different account or\n Region

", + "smithy.api#documentation": "

The specified target managed node for the session isn't fully configured for use with Session Manager.\n For more information, see Setting up\n Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you\n attempt to start a session on a managed node that is located in a different account or\n Region

", "smithy.api#error": "client" } }, @@ -29686,7 +29763,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TaskParameters": { @@ -29792,7 +29869,7 @@ "ServiceRoleArn": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up maintenance windows in the in the\n Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM service role for\n Amazon Web Services Systems Manager to assume when running a maintenance window task. If you do not specify a\n service role ARN, Systems Manager uses a service-linked role in your account. If no\n appropriate service-linked role for Systems Manager exists in your account, it is created when\n you run RegisterTaskWithMaintenanceWindow.

\n

However, for an improved security posture, we strongly recommend creating a custom\n policy and custom service role for running your maintenance window tasks. The policy\n can be crafted to provide only the permissions needed for your particular\n maintenance window tasks. For more information, see Setting up Maintenance Windows in the in the\n Amazon Web Services Systems Manager User Guide.

" } }, "TaskParameters": { @@ -29894,7 +29971,7 @@ "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an\n IAM service role for a hybrid and multicloud environment in the\n Amazon Web Services Systems Manager User Guide.

\n \n

You can't specify an IAM service-linked role for this parameter. You must\n create a unique role.

\n
", + "smithy.api#documentation": "

The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create the IAM service role required for Systems Manager in hybrid and multicloud\n environments in the Amazon Web Services Systems Manager User Guide.

\n \n

You can't specify an IAM service-linked role for this parameter. You must\n create a unique role.

\n
", "smithy.api#required": {} } } @@ -29987,7 +30064,7 @@ "Status": { "target": "com.amazonaws.ssm#OpsItemStatus", "traits": { - "smithy.api#documentation": "

The OpsItem status. Status can be Open, In Progress, or\n Resolved. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

The OpsItem status. For more information, see Editing OpsItem details in the Amazon Web Services Systems Manager User Guide.

" } }, "OpsItemId": { @@ -30178,7 +30255,7 @@ "ApprovedPatches": { "target": "com.amazonaws.ssm#PatchIdList", "traits": { - "smithy.api#documentation": "

A list of explicitly approved patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see About\n package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A list of explicitly approved patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see Package\n name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" } }, "ApprovedPatchesComplianceLevel": { @@ -30197,7 +30274,7 @@ "RejectedPatches": { "target": "com.amazonaws.ssm#PatchIdList", "traits": { - "smithy.api#documentation": "

A list of explicitly rejected patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see About\n package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" + "smithy.api#documentation": "

A list of explicitly rejected patches for the baseline.

\n

For information about accepted formats for lists of approved patches and rejected patches,\n see Package\n name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide.

" } }, "RejectedPatchesAction": { diff --git a/models/supplychain.json b/models/supplychain.json index bc17b51f2b..966ea7a2c2 100644 --- a/models/supplychain.json +++ b/models/supplychain.json @@ -14,6 +14,22 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.supplychain#AscResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 1011 + }, + "smithy.api#pattern": "^arn:aws:scn(?::([a-z0-9-]+):([0-9]+):instance)?/([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})[-_./A-Za-z0-9]*$" + } + }, + "com.amazonaws.supplychain#AwsAccountId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9]{12}$" + } + }, "com.amazonaws.supplychain#BillOfMaterialsImportJob": { "type": "structure", "members": { @@ -233,7 +249,7 @@ "clientToken": { "target": "com.amazonaws.supplychain#ClientToken", "traits": { - "smithy.api#documentation": "

An idempotency token.

", + "smithy.api#documentation": "

An idempotency token ensures the API request is only completed no more than once. This way, retrying the request will not trigger the operation multiple times. A client token is a unique, case-sensitive string of 33 to 128 ASCII characters. To make an idempotent API request, specify a client token in the request. You should not reuse the same client token for other requests. If you retry a successful request with the same client token, the request will succeed with no further actions being taken, and you will receive the same API response as the original successful request.

", "smithy.api#idempotencyToken": {} } } @@ -259,164 +275,208 @@ "smithy.api#output": {} } }, - "com.amazonaws.supplychain#DataIntegrationEventData": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 1048576 + "com.amazonaws.supplychain#CreateDataIntegrationFlow": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#CreateDataIntegrationFlowRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#CreateDataIntegrationFlowResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" }, - "smithy.api#sensitive": {} - } - }, - "com.amazonaws.supplychain#DataIntegrationEventGroupId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 255 - } - } - }, - "com.amazonaws.supplychain#DataIntegrationEventResource": { - "type": "resource", - "identifiers": { - "instanceId": { - "target": "com.amazonaws.supplychain#UUID" + { + "target": "com.amazonaws.supplychain#ConflictException" }, - "eventId": { - "target": "com.amazonaws.supplychain#UUID" - } - }, - "properties": { - "eventType": { - "target": "com.amazonaws.supplychain#DataIntegrationEventType" + { + "target": "com.amazonaws.supplychain#InternalServerException" }, - "data": { - "target": "com.amazonaws.supplychain#DataIntegrationEventData" + { + "target": "com.amazonaws.supplychain#ServiceQuotaExceededException" }, - "eventGroupId": { - "target": "com.amazonaws.supplychain#DataIntegrationEventGroupId" + { + "target": "com.amazonaws.supplychain#ThrottlingException" }, - "eventTimestamp": { - "target": "smithy.api#Timestamp" + { + "target": "com.amazonaws.supplychain#ValidationException" } - }, - "create": { - "target": "com.amazonaws.supplychain#SendDataIntegrationEvent" - }, + ], "traits": { - "aws.api#arn": { - "template": "instance/{instanceId}/data-integration-events/{eventId}" - } + "smithy.api#documentation": "

Create DataIntegrationFlow to map one or more different sources to one target using the SQL transformation query.

", + "smithy.api#examples": [ + { + "title": "Successful CreateDataIntegrationFlow for s3 to dataset flow", + "input": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow", + "sources": [ + { + "sourceType": "S3", + "sourceName": "testSourceName", + "s3Source": { + "bucketName": "aws-supply-chain-data-b8c7bb28-a576-4334-b481-6d6e8e47371f", + "prefix": "example-prefix" + } + } + ], + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT * FROM testSourceName" + } + }, + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset" + } + }, + "tags": { + "tagKey1": "tagValue1" + } + }, + "output": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow" + } + }, + { + "title": "Successful CreateDataIntegrationFlow for dataset to dataset flow", + "input": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "trading-partner", + "sources": [ + { + "sourceType": "DATASET", + "sourceName": "testSourceName1", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset1" + } + }, + { + "sourceType": "DATASET", + "sourceName": "testSourceName2", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset2" + } + } + ], + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT S1.id AS id, S1.poc_org_unit_description AS description, S1.company_id AS company_id, S1.tpartner_type AS tpartner_type, S1.geo_id AS geo_id, S1.eff_start_date AS eff_start_date, S1.eff_end_date AS eff_end_date FROM testSourceName1 AS S1 LEFT JOIN testSourceName2 as S2 ON S1.id=S2.id" + } + }, + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/asc/datasets/trading_partner" + } + }, + "tags": { + "tagKey1": "tagValue1" + } + }, + "output": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "trading-partner" + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}" + }, + "smithy.api#idempotent": {} } }, - "com.amazonaws.supplychain#DataIntegrationEventType": { - "type": "enum", + "com.amazonaws.supplychain#CreateDataIntegrationFlowRequest": { + "type": "structure", "members": { - "FORECAST": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.forecast" - } - }, - "INVENTORY_LEVEL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.inventorylevel" - } - }, - "INBOUND_ORDER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.inboundorder" - } - }, - "INBOUND_ORDER_LINE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.inboundorderline" - } - }, - "INBOUND_ORDER_LINE_SCHEDULE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.inboundorderlineschedule" - } - }, - "OUTBOUND_ORDER_LINE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.outboundorderline" - } - }, - "OUTBOUND_SHIPMENT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "scn.data.outboundshipment" - } - }, - "PROCESS_HEADER": { - "target": "smithy.api#Unit", + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", "traits": { - "smithy.api#enumValue": "scn.data.processheader" + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "PROCESS_OPERATION": { - "target": "smithy.api#Unit", + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", "traits": { - "smithy.api#enumValue": "scn.data.processoperation" + "smithy.api#documentation": "

Name of the DataIntegrationFlow.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} } }, - "PROCESS_PRODUCT": { - "target": "smithy.api#Unit", + "sources": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSourceList", "traits": { - "smithy.api#enumValue": "scn.data.processproduct" + "smithy.api#documentation": "

The source configurations for DataIntegrationFlow.

", + "smithy.api#required": {} } }, - "RESERVATION": { - "target": "smithy.api#Unit", + "transformation": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTransformation", "traits": { - "smithy.api#enumValue": "scn.data.reservation" + "smithy.api#documentation": "

The transformation configurations for DataIntegrationFlow.

", + "smithy.api#required": {} } }, - "SHIPMENT": { - "target": "smithy.api#Unit", + "target": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTarget", "traits": { - "smithy.api#enumValue": "scn.data.shipment" + "smithy.api#documentation": "

The target configurations for DataIntegrationFlow.

", + "smithy.api#required": {} } }, - "SHIPMENT_STOP": { - "target": "smithy.api#Unit", + "tags": { + "target": "com.amazonaws.supplychain#TagMap", "traits": { - "smithy.api#enumValue": "scn.data.shipmentstop" + "smithy.api#documentation": "

The tags of the DataIntegrationFlow to be created

", + "smithy.api#notProperty": {} } - }, - "SHIPMENT_STOP_ORDER": { - "target": "smithy.api#Unit", + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for CreateDataIntegrationFlow.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#CreateDataIntegrationFlowResponse": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", "traits": { - "smithy.api#enumValue": "scn.data.shipmentstoporder" + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#required": {} } }, - "SUPPLY_PLAN": { - "target": "smithy.api#Unit", + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", "traits": { - "smithy.api#enumValue": "scn.data.supplyplan" + "smithy.api#documentation": "

The name of the DataIntegrationFlow created.

", + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for CreateDataIntegrationFlow.

", + "smithy.api#output": {} } }, - "com.amazonaws.supplychain#GalaxyPublicAPIGateway": { - "type": "service", - "version": "2024-01-01", - "resources": [ - { - "target": "com.amazonaws.supplychain#BillOfMaterialsImportJobResource" - }, - { - "target": "com.amazonaws.supplychain#DataIntegrationEventResource" - } - ], + "com.amazonaws.supplychain#CreateDataLakeDataset": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#CreateDataLakeDatasetRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#CreateDataLakeDatasetResponse" + }, "errors": [ { "target": "com.amazonaws.supplychain#AccessDeniedException" @@ -427,9 +487,6 @@ { "target": "com.amazonaws.supplychain#InternalServerException" }, - { - "target": "com.amazonaws.supplychain#ResourceNotFoundException" - }, { "target": "com.amazonaws.supplychain#ServiceQuotaExceededException" }, @@ -441,148 +498,1932 @@ } ], "traits": { - "aws.api#service": { - "sdkId": "SupplyChain", - "arnNamespace": "scn", - "endpointPrefix": "scn" - }, - "aws.auth#sigv4": { - "name": "scn" - }, - "aws.protocols#restJson1": {}, - "smithy.api#cors": { - "additionalAllowedHeaders": [ - "*", - "content-type", - "x-amz-content-sha256", - "x-amz-user-agent", - "x-amzn-platform-id", - "x-amzn-trace-id" - ], - "additionalExposedHeaders": [ - "x-amzn-errortype", - "x-amzn-requestid", - "x-amzn-trace-id" - ], - "maxAge": 86400 - }, - "smithy.api#documentation": "

\n AWS Supply Chain is a cloud-based application that works with your enterprise resource planning (ERP) and supply chain management systems. Using AWS Supply Chain, you can connect and extract your inventory, supply, and demand related data from existing ERP or supply chain systems into a single data model.\n

\n

The AWS Supply Chain API supports configuration data import for Supply Planning.

\n

\n All AWS Supply chain API operations are Amazon-authenticated and certificate-signed. They not only require the use of the AWS SDK, but also allow for the exclusive use of AWS Identity and Access Management users and roles to help facilitate access, trust, and permission policies.\n

", - "smithy.api#title": "AWS Supply Chain", - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" + "smithy.api#documentation": "

Create a data lake dataset.

", + "smithy.api#examples": [ + { + "title": "Create an AWS Supply Chain inbound order dataset", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order", + "description": "This is an AWS Supply Chain inbound order dataset", + "tags": { + "tagKey1": "tagValue1", + "tagKey2": "tagValue2" + } }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ + "output": { + "dataset": { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/asc/datasets/inbound_order", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order", + "description": "This is an AWS Supply Chain inbound order dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727116807751E9, + "schema": { + "name": "InboundOrder", + "fields": [ { - "ref": "Endpoint" + "name": "id", + "type": "STRING", + "isRequired": true + }, + { + "name": "tpartner_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "connection_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "order_type", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_status", + "type": "STRING", + "isRequired": false + }, + { + "name": "inbound_order_url", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_creation_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "company_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "to_site_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "vendor_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "exchange_rate", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "exchange_rate_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "incoterm", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm2", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_1", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_2", + "type": "STRING", + "isRequired": false + }, + { + "name": "submitted_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_start_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_end_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "shipping_instr_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "payment_terms_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement_ver", + "type": "STRING", + "isRequired": false + }, + { + "name": "agreement_number", + "type": "STRING", + "isRequired": false + }, + { + "name": "source", + "type": "STRING", + "isRequired": false + }, + { + "name": "source_update_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "source_event_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "db_creation_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "db_updation_dttm", + "type": "TIMESTAMP", + "isRequired": false } ] } - ], - "rules": [ - { - "conditions": [ + } + } + }, + { + "title": "Create a custom dataset", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset", + "description": "This is a custom dataset", + "schema": { + "name": "MyDataset", + "fields": [ + { + "name": "id", + "type": "INT", + "isRequired": true + }, + { + "name": "description", + "type": "STRING", + "isRequired": true + }, + { + "name": "price", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "creation_time", + "type": "TIMESTAMP", + "isRequired": false + } + ] + }, + "tags": { + "tagKey1": "tagValue1", + "tagKey2": "tagValue2" + } + }, + "output": { + "dataset": { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/default/datasets/my_dataset", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset", + "description": "This is a custom dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727116807751E9, + "schema": { + "name": "MyDataset", + "fields": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "rules": [ + "name": "id", + "type": "INT", + "isRequired": true + }, { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "name": "description", + "type": "STRING", + "isRequired": true }, { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "name": "price", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "creation_time", + "type": "TIMESTAMP", + "isRequired": false } - ], - "type": "tree" + ] } - ], - "type": "tree" + } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "PUT", + "uri": "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#CreateDataLakeDatasetRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", + "traits": { + "smithy.api#documentation": "

The name space of the dataset.

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName", + "traits": { + "smithy.api#documentation": "

The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "schema": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchema", + "traits": { + "smithy.api#documentation": "

The custom schema of the data lake dataset and is only required when the name space is default.

" + } + }, + "description": { + "target": "com.amazonaws.supplychain#DataLakeDatasetDescription", + "traits": { + "smithy.api#documentation": "

The description of the dataset.

" + } + }, + "tags": { + "target": "com.amazonaws.supplychain#TagMap", + "traits": { + "smithy.api#documentation": "

The tags of the dataset.

", + "smithy.api#notProperty": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for CreateDataLakeDataset.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#CreateDataLakeDatasetResponse": { + "type": "structure", + "members": { + "dataset": { + "target": "com.amazonaws.supplychain#DataLakeDataset", + "traits": { + "smithy.api#documentation": "

The detail of created dataset.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters of CreateDataLakeDataset.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#CreateInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#CreateInstanceRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#CreateInstanceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#ConflictException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance.

", + "smithy.api#examples": [ + { + "title": "Successful CreateInstance request with all input data", + "input": { + "instanceName": "example instance name", + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "tags": { + "tagKey1": "tagValue1" + } }, - { - "conditions": [], - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" + "output": { + "instance": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Initializing", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "example instance name", + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 + } + } + }, + { + "title": "Successful CreateInstance request with no input data", + "output": { + "instance": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Initializing", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceDescription": "", + "kmsKeyArn": "arn:aws:kms:us-west-2:456789012345:key/7372eb6d-874c-4212-8d49-7804282d33a8", + "versionNumber": 2.0 + } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/api/instance" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#CreateInstanceRequest": { + "type": "structure", + "members": { + "instanceName": { + "target": "com.amazonaws.supplychain#InstanceName", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance name.

" + } + }, + "instanceDescription": { + "target": "com.amazonaws.supplychain#InstanceDescription", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance description.

" + } + }, + "kmsKeyArn": { + "target": "com.amazonaws.supplychain#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for encryption. This is required if you do not want to use the Amazon Web Services owned KMS key. If you don't provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key.

" + } + }, + "tags": { + "target": "com.amazonaws.supplychain#TagMap", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services tags of an instance to be created.

", + "smithy.api#notProperty": {} + } + }, + "clientToken": { + "target": "com.amazonaws.supplychain#ClientToken", + "traits": { + "smithy.api#documentation": "

The client token for idempotency.

", + "smithy.api#idempotencyToken": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for CreateInstance.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#CreateInstanceResponse": { + "type": "structure", + "members": { + "instance": { + "target": "com.amazonaws.supplychain#Instance", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance resource data details.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for CreateInstance.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#DataIntegrationEventData": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1048576 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.supplychain#DataIntegrationEventGroupId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.supplychain#DataIntegrationEventResource": { + "type": "resource", + "identifiers": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID" + }, + "eventId": { + "target": "com.amazonaws.supplychain#UUID" + } + }, + "properties": { + "eventType": { + "target": "com.amazonaws.supplychain#DataIntegrationEventType" + }, + "data": { + "target": "com.amazonaws.supplychain#DataIntegrationEventData" + }, + "eventGroupId": { + "target": "com.amazonaws.supplychain#DataIntegrationEventGroupId" + }, + "eventTimestamp": { + "target": "smithy.api#Timestamp" + } + }, + "create": { + "target": "com.amazonaws.supplychain#SendDataIntegrationEvent" + }, + "traits": { + "aws.api#arn": { + "template": "instance/{instanceId}/data-integration-events/{eventId}" + } + } + }, + "com.amazonaws.supplychain#DataIntegrationEventType": { + "type": "enum", + "members": { + "FORECAST": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.forecast" + } + }, + "INVENTORY_LEVEL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.inventorylevel" + } + }, + "INBOUND_ORDER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.inboundorder" + } + }, + "INBOUND_ORDER_LINE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.inboundorderline" + } + }, + "INBOUND_ORDER_LINE_SCHEDULE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.inboundorderlineschedule" + } + }, + "OUTBOUND_ORDER_LINE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.outboundorderline" + } + }, + "OUTBOUND_SHIPMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.outboundshipment" + } + }, + "PROCESS_HEADER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.processheader" + } + }, + "PROCESS_OPERATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.processoperation" + } + }, + "PROCESS_PRODUCT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.processproduct" + } + }, + "RESERVATION": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.reservation" + } + }, + "SHIPMENT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.shipment" + } + }, + "SHIPMENT_STOP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.shipmentstop" + } + }, + "SHIPMENT_STOP_ORDER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.shipmentstoporder" + } + }, + "SUPPLY_PLAN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "scn.data.supplyplan" + } + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlow": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow instance ID.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "instanceId" + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow name.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "name" + } + }, + "sources": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSourceList", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow source configurations.

", + "smithy.api#required": {} + } + }, + "transformation": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTransformation", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow transformation configurations.

", + "smithy.api#required": {} + } + }, + "target": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTarget", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow target configuration.

", + "smithy.api#required": {} + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow creation timestamp.

", + "smithy.api#required": {} + } + }, + "lastModifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow last modified timestamp.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow details.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowDatasetOptions": { + "type": "structure", + "members": { + "loadType": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowLoadType", + "traits": { + "smithy.api#documentation": "

The dataset data load type in dataset options.

" + } + }, + "dedupeRecords": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

The dataset load option to remove duplicates.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The dataset options used in dataset source and target configurations.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowDatasetSourceConfiguration": { + "type": "structure", + "members": { + "datasetIdentifier": { + "target": "com.amazonaws.supplychain#DatasetIdentifier", + "traits": { + "smithy.api#documentation": "

The ARN of the dataset.

", + "smithy.api#required": {} + } + }, + "options": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowDatasetOptions", + "traits": { + "smithy.api#documentation": "

The dataset DataIntegrationFlow source options.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The dataset DataIntegrationFlow source configuration parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowDatasetTargetConfiguration": { + "type": "structure", + "members": { + "datasetIdentifier": { + "target": "com.amazonaws.supplychain#DatasetIdentifier", + "traits": { + "smithy.api#documentation": "

The dataset ARN.

", + "smithy.api#required": {} + } + }, + "options": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowDatasetOptions", + "traits": { + "smithy.api#documentation": "

The dataset DataIntegrationFlow target options.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The dataset DataIntegrationFlow target configuration parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowFileType": { + "type": "enum", + "members": { + "CSV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CSV" + } + }, + "PARQUET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PARQUET" + } + }, + "JSON": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "JSON" + } + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#DataIntegrationFlow" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowLoadType": { + "type": "enum", + "members": { + "INCREMENTAL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INCREMENTAL" + } + }, + "REPLACE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "REPLACE" + } + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowMaxResults": { + "type": "integer", + "traits": { + "smithy.api#default": 10, + "smithy.api#range": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9-]+$" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowNextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 65535 + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowResource": { + "type": "resource", + "identifiers": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID" + }, + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName" + } + }, + "properties": { + "sources": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSourceList" + }, + "transformation": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTransformation" + }, + "target": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTarget" + }, + "createdTime": { + "target": "smithy.api#Timestamp" + }, + "lastModifiedTime": { + "target": "smithy.api#Timestamp" + } + }, + "put": { + "target": "com.amazonaws.supplychain#CreateDataIntegrationFlow" + }, + "read": { + "target": "com.amazonaws.supplychain#GetDataIntegrationFlow" + }, + "update": { + "target": "com.amazonaws.supplychain#UpdateDataIntegrationFlow" + }, + "delete": { + "target": "com.amazonaws.supplychain#DeleteDataIntegrationFlow" + }, + "list": { + "target": "com.amazonaws.supplychain#ListDataIntegrationFlows" + }, + "traits": { + "aws.api#arn": { + "template": "instance/{instanceId}/data-integration-flows/{name}" + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowS3Options": { + "type": "structure", + "members": { + "fileType": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowFileType", + "traits": { + "smithy.api#documentation": "

The Amazon S3 file type in S3 options.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The Amazon S3 options used in S3 source and target configurations.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowS3Prefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 700 + }, + "smithy.api#pattern": "^[/A-Za-z0-9._-]+$" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowS3SourceConfiguration": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.supplychain#S3BucketName", + "traits": { + "smithy.api#documentation": "

The bucketName of the S3 source objects.

", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowS3Prefix", + "traits": { + "smithy.api#documentation": "

The prefix of the S3 source objects.

", + "smithy.api#required": {} + } + }, + "options": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowS3Options", + "traits": { + "smithy.api#documentation": "

The other options of the S3 DataIntegrationFlow source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The S3 DataIntegrationFlow source configuration parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowS3TargetConfiguration": { + "type": "structure", + "members": { + "bucketName": { + "target": "com.amazonaws.supplychain#S3BucketName", + "traits": { + "smithy.api#documentation": "

The bucketName of the S3 target objects.

", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowS3Prefix", + "traits": { + "smithy.api#documentation": "

The prefix of the S3 target objects.

", + "smithy.api#required": {} + } + }, + "options": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowS3Options", + "traits": { + "smithy.api#documentation": "

The S3 DataIntegrationFlow target options.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The S3 DataIntegrationFlow target configuration parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowSQLQuery": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 65535 + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowSQLTransformationConfiguration": { + "type": "structure", + "members": { + "query": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSQLQuery", + "traits": { + "smithy.api#documentation": "

The transformation SQL query body based on SparkSQL.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The SQL DataIntegrationFlow transformation configuration parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowSource": { + "type": "structure", + "members": { + "sourceType": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSourceType", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow source type.

", + "smithy.api#required": {} + } + }, + "sourceName": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSourceName", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow source name that can be used as table alias in SQL transformation query.

", + "smithy.api#required": {} + } + }, + "s3Source": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowS3SourceConfiguration", + "traits": { + "smithy.api#documentation": "

The S3 DataIntegrationFlow source.

" + } + }, + "datasetSource": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowDatasetSourceConfiguration", + "traits": { + "smithy.api#documentation": "

The dataset DataIntegrationFlow source.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow source parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowSourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSource" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 40 + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowSourceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[A-Za-z0-9_]+$" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowSourceType": { + "type": "enum", + "members": { + "S3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3" + } + }, + "DATASET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATASET" + } + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowTarget": { + "type": "structure", + "members": { + "targetType": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTargetType", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow target type.

", + "smithy.api#required": {} + } + }, + "s3Target": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowS3TargetConfiguration", + "traits": { + "smithy.api#documentation": "

The S3 DataIntegrationFlow target.

" + } + }, + "datasetTarget": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowDatasetTargetConfiguration", + "traits": { + "smithy.api#documentation": "

The dataset DataIntegrationFlow target.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow target parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowTargetType": { + "type": "enum", + "members": { + "S3": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "S3" + } + }, + "DATASET": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DATASET" + } + } + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowTransformation": { + "type": "structure", + "members": { + "transformationType": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTransformationType", + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow transformation type.

", + "smithy.api#required": {} + } + }, + "sqlTransformation": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSQLTransformationConfiguration", + "traits": { + "smithy.api#documentation": "

The SQL DataIntegrationFlow transformation configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The DataIntegrationFlow transformation parameters.

" + } + }, + "com.amazonaws.supplychain#DataIntegrationFlowTransformationType": { + "type": "enum", + "members": { + "SQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SQL" + } + }, + "NONE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "NONE" + } + } + } + }, + "com.amazonaws.supplychain#DataLakeDataset": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "instanceId" + } + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", + "traits": { + "smithy.api#documentation": "

The name space of the dataset. The available values are:

\n ", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "namespace" + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName", + "traits": { + "smithy.api#documentation": "

The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "name" + } + }, + "arn": { + "target": "com.amazonaws.supplychain#AscResourceArn", + "traits": { + "smithy.api#documentation": "

The arn of the dataset.

", + "smithy.api#required": {} + } + }, + "schema": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchema", + "traits": { + "smithy.api#documentation": "

The schema of the dataset.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.supplychain#DataLakeDatasetDescription", + "traits": { + "smithy.api#documentation": "

The description of the dataset.

" + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the dataset.

", + "smithy.api#required": {} + } + }, + "lastModifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The last modified time of the dataset.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The data lake dataset details.

" + } + }, + "com.amazonaws.supplychain#DataLakeDatasetDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.supplychain#DataLakeDatasetList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#DataLakeDataset" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.supplychain#DataLakeDatasetMaxResults": { + "type": "integer", + "traits": { + "smithy.api#default": 10, + "smithy.api#range": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.supplychain#DataLakeDatasetName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 75 + }, + "smithy.api#pattern": "^[a-z0-9_]+$" + } + }, + "com.amazonaws.supplychain#DataLakeDatasetNamespace": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "^[a-z]+$" + } + }, + "com.amazonaws.supplychain#DataLakeDatasetNextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 65535 + } + } + }, + "com.amazonaws.supplychain#DataLakeDatasetResource": { + "type": "resource", + "identifiers": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID" + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace" + }, + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName" + } + }, + "properties": { + "arn": { + "target": "com.amazonaws.supplychain#AscResourceArn" + }, + "schema": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchema" + }, + "description": { + "target": "com.amazonaws.supplychain#DataLakeDatasetDescription" + }, + "createdTime": { + "target": "smithy.api#Timestamp" + }, + "lastModifiedTime": { + "target": "smithy.api#Timestamp" + } + }, + "put": { + "target": "com.amazonaws.supplychain#CreateDataLakeDataset" + }, + "read": { + "target": "com.amazonaws.supplychain#GetDataLakeDataset" + }, + "update": { + "target": "com.amazonaws.supplychain#UpdateDataLakeDataset" + }, + "delete": { + "target": "com.amazonaws.supplychain#DeleteDataLakeDataset" + }, + "list": { + "target": "com.amazonaws.supplychain#ListDataLakeDatasets" + }, + "traits": { + "aws.api#arn": { + "template": "instance/{instanceId}/namespaces/{namespace}/datasets/{name}" + }, + "smithy.api#noReplace": {} + } + }, + "com.amazonaws.supplychain#DataLakeDatasetSchema": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchemaName", + "traits": { + "smithy.api#documentation": "

The name of the dataset schema.

", + "smithy.api#required": {} + } + }, + "fields": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchemaFieldList", + "traits": { + "smithy.api#documentation": "

The list of field details of the dataset schema.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The schema details of the dataset.

" + } + }, + "com.amazonaws.supplychain#DataLakeDatasetSchemaField": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchemaFieldName", + "traits": { + "smithy.api#documentation": "

The dataset field name.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchemaFieldType", + "traits": { + "smithy.api#documentation": "

The dataset field type.

", + "smithy.api#required": {} + } + }, + "isRequired": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Indicate if the field is required or not.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The dataset field details.

" + } + }, + "com.amazonaws.supplychain#DataLakeDatasetSchemaFieldList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#DataLakeDatasetSchemaField" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.supplychain#DataLakeDatasetSchemaFieldName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[a-z0-9_]+$" + } + }, + "com.amazonaws.supplychain#DataLakeDatasetSchemaFieldType": { + "type": "enum", + "members": { + "INT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "INT" + } + }, + "DOUBLE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DOUBLE" + } + }, + "STRING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "STRING" + } + }, + "TIMESTAMP": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "TIMESTAMP" + } + } + } + }, + "com.amazonaws.supplychain#DataLakeDatasetSchemaName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[A-Za-z0-9]+$" + } + }, + "com.amazonaws.supplychain#DatasetIdentifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1011 + }, + "smithy.api#pattern": "^[-_/A-Za-z0-9:]+$" + } + }, + "com.amazonaws.supplychain#DeleteDataIntegrationFlow": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#DeleteDataIntegrationFlowRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#DeleteDataIntegrationFlowResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete the DataIntegrationFlow.

", + "smithy.api#examples": [ + { + "title": "Successful DeleteDataIntegrationFlow", + "input": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow" + }, + "output": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow" + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#DeleteDataIntegrationFlowRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", + "traits": { + "smithy.api#documentation": "

The name of the DataIntegrationFlow to be deleted.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for DeleteDataIntegrationFlow.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#DeleteDataIntegrationFlowResponse": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", + "traits": { + "smithy.api#documentation": "

The name of the DataIntegrationFlow deleted.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for DeleteDataIntegrationFlow.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#DeleteDataLakeDataset": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#DeleteDataLakeDatasetRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#DeleteDataLakeDatasetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete a data lake dataset.

", + "smithy.api#examples": [ + { + "title": "Delete an AWS Supply Chain inbound_order dataset", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order" + }, + "output": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order" + } + }, + { + "title": "Delete a custom dataset", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset" + }, + "output": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset" + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#DeleteDataLakeDatasetRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", + "traits": { + "smithy.api#documentation": "

The namespace of the dataset. The available values are:

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName", + "traits": { + "smithy.api#documentation": "

The name of the dataset. If the namespace is asc, the name must be one of the supported data entities\n .

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters of DeleteDataLakeDataset.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#DeleteDataLakeDatasetResponse": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", + "traits": { + "smithy.api#documentation": "

The namespace of deleted dataset.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName", + "traits": { + "smithy.api#documentation": "

The name of deleted dataset.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters of DeleteDataLakeDataset.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#DeleteInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#DeleteInstanceRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#DeleteInstanceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status.

", + "smithy.api#examples": [ + { + "title": "Successful DeleteInstance request", + "input": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793" + }, + "output": { + "instance": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Deleting", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "updated example instance name", + "instanceDescription": "updated example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 + } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/api/instance/{instanceId}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#DeleteInstanceRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for DeleteInstance.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#DeleteInstanceResponse": { + "type": "structure", + "members": { + "instance": { + "target": "com.amazonaws.supplychain#Instance", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance resource data details.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for DeleteInstance.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#GalaxyPublicAPIGateway": { + "type": "service", + "version": "2024-01-01", + "operations": [ + { + "target": "com.amazonaws.supplychain#ListTagsForResource" + }, + { + "target": "com.amazonaws.supplychain#TagResource" + }, + { + "target": "com.amazonaws.supplychain#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.supplychain#BillOfMaterialsImportJobResource" + }, + { + "target": "com.amazonaws.supplychain#DataIntegrationEventResource" + }, + { + "target": "com.amazonaws.supplychain#DataIntegrationFlowResource" + }, + { + "target": "com.amazonaws.supplychain#DataLakeDatasetResource" + }, + { + "target": "com.amazonaws.supplychain#InstanceResource" + } + ], + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#ConflictException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "SupplyChain", + "arnNamespace": "scn", + "endpointPrefix": "scn" + }, + "aws.auth#sigv4": { + "name": "scn" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "*", + "content-type", + "x-amz-content-sha256", + "x-amz-user-agent", + "x-amzn-platform-id", + "x-amzn-trace-id" + ], + "additionalExposedHeaders": [ + "x-amzn-errortype", + "x-amzn-requestid", + "x-amzn-trace-id" + ], + "maxAge": 86400 + }, + "smithy.api#documentation": "

\n AWS Supply Chain is a cloud-based application that works with your enterprise resource planning (ERP) and supply chain management systems. Using AWS Supply Chain, you can connect and extract your inventory, supply, and demand related data from existing ERP or supply chain systems into a single data model.\n

\n

The AWS Supply Chain API supports configuration data import for Supply Planning.

\n

\n All AWS Supply chain API operations are Amazon-authenticated and certificate-signed. They not only require the use of the AWS SDK, but also allow for the exclusive use of AWS Identity and Access Management users and roles to help facilitate access, trust, and permission policies.\n

", + "smithy.api#title": "AWS Supply Chain", + "smithy.rules#endpointRuleSet": { + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" } ], "assign": "PartitionResult" @@ -805,342 +2646,2405 @@ ], "type": "tree" } - ], - "type": "tree" + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] + }, + "smithy.rules#endpointTests": { + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" + } + } + }, + "com.amazonaws.supplychain#GetBillOfMaterialsImportJob": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#GetBillOfMaterialsImportJobRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#GetBillOfMaterialsImportJobResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Get status and details of a BillOfMaterialsImportJob.

", + "smithy.api#examples": [ + { + "title": "Invoke GetBillOfMaterialsImportJob for a successful job", + "input": { + "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", + "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4" + }, + "output": { + "job": { + "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", + "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4", + "status": "SUCCESS", + "s3uri": "s3://mybucketname/pathelemene/file.csv", + "message": "Import job completed successfully." + } + } + }, + { + "title": "Invoke GetBillOfMaterialsImportJob for an in-progress job", + "input": { + "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", + "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4" + }, + "output": { + "job": { + "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", + "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4", + "status": "IN_PROGRESS", + "s3uri": "s3://mybucketname/pathelemene/file.csv" + } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/configuration/instances/{instanceId}/bill-of-materials-import-jobs/{jobId}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#GetBillOfMaterialsImportJobRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "jobId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The BillOfMaterialsImportJob identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for GetBillOfMaterialsImportJob.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#GetBillOfMaterialsImportJobResponse": { + "type": "structure", + "members": { + "job": { + "target": "com.amazonaws.supplychain#BillOfMaterialsImportJob", + "traits": { + "smithy.api#documentation": "

The BillOfMaterialsImportJob.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for GetBillOfMaterialsImportJob.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#GetDataIntegrationFlow": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#GetDataIntegrationFlowRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#GetDataIntegrationFlowResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

View the DataIntegrationFlow details.

", + "smithy.api#examples": [ + { + "title": "Successful GetDataIntegrationFlow", + "input": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow" + }, + "output": { + "flow": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow", + "sources": [ + { + "sourceType": "S3", + "sourceName": "testSourceName", + "s3Source": { + "bucketName": "aws-supply-chain-data-b8c7bb28-a576-4334-b481-6d6e8e47371f", + "prefix": "example-prefix" + } + } + ], + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset" + } }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ], - "type": "tree" + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT * FROM testSourceName" + } + }, + "createdTime": 1.72495640044E9, + "lastModifiedTime": 1.72495640044E9 + } } - ] + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}" }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#GetDataIntegrationFlowRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", + "traits": { + "smithy.api#documentation": "

The name of the DataIntegrationFlow created.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for GetDataIntegrationFlow.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#GetDataIntegrationFlowResponse": { + "type": "structure", + "members": { + "flow": { + "target": "com.amazonaws.supplychain#DataIntegrationFlow", + "traits": { + "smithy.api#documentation": "

The details of the DataIntegrationFlow returned.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for GetDataIntegrationFlow.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#GetDataLakeDataset": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#GetDataLakeDatasetRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#GetDataLakeDatasetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Get a data lake dataset.

", + "smithy.api#examples": [ + { + "title": "Get properties of an existing AWS Supply Chain inbound order dataset", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order" }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://scn.us-east-1.api.aws" + "output": { + "dataset": { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/asc/datasets/inbound_order", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order", + "description": "This is an AWS Supply Chain inbound order dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727116807751E9, + "schema": { + "name": "InboundOrder", + "fields": [ + { + "name": "id", + "type": "STRING", + "isRequired": true + }, + { + "name": "tpartner_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "connection_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "order_type", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_status", + "type": "STRING", + "isRequired": false + }, + { + "name": "inbound_order_url", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_creation_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "company_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "to_site_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "vendor_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "exchange_rate", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "exchange_rate_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "incoterm", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm2", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_1", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_2", + "type": "STRING", + "isRequired": false + }, + { + "name": "submitted_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_start_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_end_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "shipping_instr_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "payment_terms_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement_ver", + "type": "STRING", + "isRequired": false + }, + { + "name": "agreement_number", + "type": "STRING", + "isRequired": false + }, + { + "name": "source", + "type": "STRING", + "isRequired": false + }, + { + "name": "source_update_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "source_event_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "db_creation_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "db_updation_dttm", + "type": "TIMESTAMP", + "isRequired": false + } + ] } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true } + } + }, + { + "title": "Get proporties of an existing custom dataset", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset" }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn.us-east-1.amazonaws.com" + "output": { + "dataset": { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/default/datasets/my_dataset", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset", + "description": "This is a custom dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727116807751E9, + "schema": { + "name": "MyDataset", + "fields": [ + { + "name": "id", + "type": "INT", + "isRequired": true + }, + { + "name": "description", + "type": "STRING", + "isRequired": true + }, + { + "name": "price", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "creation_time", + "type": "TIMESTAMP", + "isRequired": false + } + ] } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#GetDataLakeDatasetRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", + "traits": { + "smithy.api#documentation": "

The name space of the dataset. The available values are:

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName", + "traits": { + "smithy.api#documentation": "

The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for GetDataLakeDataset.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#GetDataLakeDatasetResponse": { + "type": "structure", + "members": { + "dataset": { + "target": "com.amazonaws.supplychain#DataLakeDataset", + "traits": { + "smithy.api#documentation": "

The fetched dataset details.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for UpdateDataLakeDataset.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#GetInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#GetInstanceRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#GetInstanceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Get the AWS Supply Chain instance details.

", + "smithy.api#examples": [ + { + "title": "Successful GetInstance request", + "input": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793" }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true + "output": { + "instance": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Active", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "example instance name", + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/instance/{instanceId}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#GetInstanceRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for GetInstance.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#GetInstanceResponse": { + "type": "structure", + "members": { + "instance": { + "target": "com.amazonaws.supplychain#Instance", + "traits": { + "smithy.api#documentation": "

The instance resource data details.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for GetInstance.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#Instance": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#required": {}, + "smithy.api#resourceIdentifier": "instanceId" + } + }, + "awsAccountId": { + "target": "com.amazonaws.supplychain#AwsAccountId", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services account ID that owns the instance.

", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.supplychain#InstanceState", + "traits": { + "smithy.api#documentation": "

The state of the instance.

", + "smithy.api#required": {} + } + }, + "webAppDnsDomain": { + "target": "com.amazonaws.supplychain#InstanceWebAppDnsDomain", + "traits": { + "smithy.api#documentation": "

The WebApp DNS domain name of the instance.

" + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The instance creation timestamp.

" + } + }, + "lastModifiedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The instance last modified timestamp.

" + } + }, + "instanceName": { + "target": "com.amazonaws.supplychain#InstanceName", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance name.

" + } + }, + "instanceDescription": { + "target": "com.amazonaws.supplychain#InstanceDescription", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance description.

" + } + }, + "kmsKeyArn": { + "target": "com.amazonaws.supplychain#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you optionally provided for encryption. If you did not provide anything here, AWS Supply Chain uses the Amazon Web Services owned KMS key and nothing is returned.

" + } + }, + "versionNumber": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

The version number of the instance.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the instance.

" + } + }, + "com.amazonaws.supplychain#InstanceDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 501 + }, + "smithy.api#pattern": "^([a-zA-Z0-9., _ʼ'%-]){0,500}$" + } + }, + "com.amazonaws.supplychain#InstanceList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#Instance" + } + }, + "com.amazonaws.supplychain#InstanceMaxResults": { + "type": "integer", + "traits": { + "smithy.api#default": 10, + "smithy.api#range": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.supplychain#InstanceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 63 + }, + "smithy.api#pattern": "^(?![ _ʼ'%-])[a-zA-Z0-9 _ʼ'%-]{0,62}[a-zA-Z0-9]$" + } + }, + "com.amazonaws.supplychain#InstanceNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#InstanceName" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10 + } + } + }, + "com.amazonaws.supplychain#InstanceNextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.supplychain#InstanceResource": { + "type": "resource", + "identifiers": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID" + } + }, + "properties": { + "awsAccountId": { + "target": "com.amazonaws.supplychain#AwsAccountId" + }, + "webAppDnsDomain": { + "target": "com.amazonaws.supplychain#InstanceWebAppDnsDomain" + }, + "state": { + "target": "com.amazonaws.supplychain#InstanceState" + }, + "createdTime": { + "target": "smithy.api#Timestamp" + }, + "lastModifiedTime": { + "target": "smithy.api#Timestamp" + }, + "instanceName": { + "target": "com.amazonaws.supplychain#InstanceName" + }, + "instanceDescription": { + "target": "com.amazonaws.supplychain#InstanceDescription" + }, + "kmsKeyArn": { + "target": "com.amazonaws.supplychain#KmsKeyArn" + }, + "versionNumber": { + "target": "smithy.api#Double" + } + }, + "create": { + "target": "com.amazonaws.supplychain#CreateInstance" + }, + "read": { + "target": "com.amazonaws.supplychain#GetInstance" + }, + "update": { + "target": "com.amazonaws.supplychain#UpdateInstance" + }, + "delete": { + "target": "com.amazonaws.supplychain#DeleteInstance" + }, + "list": { + "target": "com.amazonaws.supplychain#ListInstances" + }, + "traits": { + "aws.api#arn": { + "template": "instance/{instanceId}" + } + } + }, + "com.amazonaws.supplychain#InstanceState": { + "type": "enum", + "members": { + "INITIALIZING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Initializing" + } + }, + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "CREATE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "CreateFailed" + } + }, + "DELETE_FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DeleteFailed" + } + }, + "DELETING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleting" + } + }, + "DELETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Deleted" + } + } + } + }, + "com.amazonaws.supplychain#InstanceStateList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#InstanceState" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 6 + } + } + }, + "com.amazonaws.supplychain#InstanceWebAppDnsDomain": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9]+(.[A-Za-z0-9]+)+$" + } + }, + "com.amazonaws.supplychain#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

Unexpected error during processing of request.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.supplychain#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 2048 + }, + "smithy.api#pattern": "^arn:[a-z0-9][-.a-z0-9]{0,62}:kms:([a-z0-9][-.a-z0-9]{0,62})?:([a-z0-9][-.a-z0-9]{0,62})?:key/.{0,1019}$" + } + }, + "com.amazonaws.supplychain#ListDataIntegrationFlows": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#ListDataIntegrationFlowsRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#ListDataIntegrationFlowsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists all the DataIntegrationFlows in a paginated way.

", + "smithy.api#examples": [ + { + "title": "Successful ListDataIntegrationFlow", + "input": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d" }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.cn-north-1.amazonaws.com.cn" + "output": { + "flows": [ + { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow", + "sources": [ + { + "sourceType": "S3", + "sourceName": "testSourceName", + "s3Source": { + "bucketName": "aws-supply-chain-data-b8c7bb28-a576-4334-b481-6d6e8e47371f", + "prefix": "example-prefix" + } + } + ], + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset" + } + }, + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT * FROM testSourceName" + } + }, + "createdTime": 1.72495640044E9, + "lastModifiedTime": 1.72495640044E9 + }, + { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "trading-partner", + "sources": [ + { + "sourceType": "DATASET", + "sourceName": "testSourceName1", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset1" + } + }, + { + "sourceType": "DATASET", + "sourceName": "testSourceName2", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset2" + } + } + ], + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT S1.id AS id, S1.poc_org_unit_description AS description, S1.company_id AS company_id, S1.tpartner_type AS tpartner_type, S1.geo_id AS geo_id, S1.eff_start_date AS eff_start_date, S1.eff_end_date AS eff_end_date FROM testSourceName1 AS S1 LEFT JOIN testSourceName2 as S2 ON S1.id=S2.id" + } + }, + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/asc/datasets/trading_partner" + } + }, + "createdTime": 1.723576350688E10, + "lastModifiedTime": 1.723576350688E10 } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } + ] + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/data-integration/instance/{instanceId}/data-integration-flows" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "flows" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#ListDataIntegrationFlowsRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowNextToken", + "traits": { + "smithy.api#documentation": "

The pagination token to fetch the next page of the DataIntegrationFlows.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowMaxResults", + "traits": { + "smithy.api#default": 10, + "smithy.api#documentation": "

Specify the maximum number of DataIntegrationFlows to fetch in one paginated request.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for ListDataIntegrationFlows.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#ListDataIntegrationFlowsResponse": { + "type": "structure", + "members": { + "flows": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowList", + "traits": { + "smithy.api#documentation": "

The response parameters for ListDataIntegrationFlows.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowNextToken", + "traits": { + "smithy.api#documentation": "

The pagination token to fetch the next page of the DataIntegrationFlows.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for ListDataIntegrationFlows.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#ListDataLakeDatasets": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#ListDataLakeDatasetsRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#ListDataLakeDatasetsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List the data lake datasets for a specific instance and name space.

", + "smithy.api#examples": [ + { + "title": "List AWS Supply Chain datasets", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc" }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://scn.cn-north-1.api.amazonwebservices.com.cn" + "output": { + "datasets": [ + { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/asc/datasets/inbound_order", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order", + "description": "This is an AWS Supply Chain inbound order dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727116807751E9, + "schema": { + "name": "InboundOrder", + "fields": [ + { + "name": "id", + "type": "STRING", + "isRequired": true + }, + { + "name": "tpartner_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "connection_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "order_type", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_status", + "type": "STRING", + "isRequired": false + }, + { + "name": "inbound_order_url", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_creation_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "company_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "to_site_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "vendor_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "exchange_rate", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "exchange_rate_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "incoterm", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm2", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_1", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_2", + "type": "STRING", + "isRequired": false + }, + { + "name": "submitted_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_start_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_end_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "shipping_instr_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "payment_terms_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement_ver", + "type": "STRING", + "isRequired": false + }, + { + "name": "agreement_number", + "type": "STRING", + "isRequired": false + }, + { + "name": "source", + "type": "STRING", + "isRequired": false + }, + { + "name": "source_update_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "source_event_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "db_creation_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "db_updation_dttm", + "type": "TIMESTAMP", + "isRequired": false + } + ] + } } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } + ] + } + }, + { + "title": "List custom datasets using pagination", + "input": { + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "maxResults": 2, + "nextToken": "next_token_returned_from_previous_list_request" }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn.cn-north-1.amazonaws.com.cn" + "output": { + "datasets": [ + { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/default/datasets/my_dataset", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset", + "description": "This is a custom dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727116807751E9, + "schema": { + "name": "MyDataset", + "fields": [ + { + "name": "id", + "type": "INT", + "isRequired": true + }, + { + "name": "description", + "type": "STRING", + "isRequired": true + }, + { + "name": "price", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "creation_time", + "type": "TIMESTAMP", + "isRequired": false + } + ] + } + }, + { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/default/datasets/my_dataset_2", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset_2", + "description": "This is a custom dataset 2", + "createdTime": 1.727116907751E9, + "lastModifiedTime": 1.727116907751E9, + "schema": { + "name": "MyDataset2", + "fields": [ + { + "name": "id", + "type": "INT", + "isRequired": true + }, + { + "name": "description", + "type": "STRING", + "isRequired": true + } + ] + } } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.us-gov-east-1.api.aws" + ], + "nextToken": "next_token_for_next_list_request" + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "datasets" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#ListDataLakeDatasetsRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", + "traits": { + "smithy.api#documentation": "

The namespace of the dataset. The available values are:

\n ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNextToken", + "traits": { + "smithy.api#documentation": "

The pagination token to fetch next page of datasets.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.supplychain#DataLakeDatasetMaxResults", + "traits": { + "smithy.api#default": 10, + "smithy.api#documentation": "

The max number of datasets to fetch in this paginated request.

", + "smithy.api#httpQuery": "maxResults" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters of ListDataLakeDatasets.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#ListDataLakeDatasetsResponse": { + "type": "structure", + "members": { + "datasets": { + "target": "com.amazonaws.supplychain#DataLakeDatasetList", + "traits": { + "smithy.api#documentation": "

The list of fetched dataset details.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNextToken", + "traits": { + "smithy.api#documentation": "

The pagination token to fetch next page of datasets.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters of ListDataLakeDatasets.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#ListInstances": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#ListInstancesRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#ListInstancesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List all the AWS Supply Chain instances in a paginated way.

", + "smithy.api#examples": [ + { + "title": "Successful ListInstance request with no input data", + "output": { + "instances": [ + { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Active", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "example instance name", + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 + }, + { + "instanceId": "3ad8116a-644d-4172-8dcb-20e51d314c14", + "awsAccountId": "123456789012", + "state": "Initializing", + "createdTime": 17261674383136, + "lastModifiedTime": 17261674383136, + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } + ] + } + }, + { + "title": "Successful ListInstance request with filters", + "input": { + "instanceNameFilter": [ + "example instance name" + ], + "instanceStateFilter": [ + "Active" + ] }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.us-gov-east-1.amazonaws.com" + "output": { + "instances": [ + { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Active", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "example instance name", + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } + ] + } + }, + { + "title": "Successful ListInstance request with maxResult override", + "input": { + "maxResults": 1 }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://scn.us-gov-east-1.api.aws" + "output": { + "instances": [ + { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Active", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "example instance name", + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } + ], + "nextToken": "AAQA-EFRSURBSGhtcng0c0dxbENwUHdnckVIbkFYNU1QVjRTZWN2ak5iMFVicC8zemlHOVF3SEpjSC9WTWJVVXBMV2Z1N3ZvZlQ0WEFBQUFmakI4QmdrcWhraUc5dzBCQndhZ2J6QnRBZ0VBTUdnR0NTcUdTSWIzRFFFSEFUQWVCZ2xnaGtnQlpRTUVBUzR3RVFRTTJibW9LemgrSWZTY0RaZEdBZ0VRZ0R2dDhsQnVGbGJ0dnFTZityWmNSWEVPbG93emJoSjhxOGNMbGQ1UGMvY0VRbWlTR3pQUFd4N2RraXY5Y0ovcS9vSmFYZVBGdWVHaU0zWmd0dz09n-rC1ejA5--7ltJxpDT2xP_i8xGqDPMOZfjpp8q6l5NuP9_bnBURvwwYhdqDriMK5_f96LuPEnPbuML-ItfgEiCcUy0p2tApvpZkZqOG5fbqP-4C5aDYPTffHLyq-MMqvfrGVJzL1nvkpZcnTkVR9VJsu5b8I0qqDW0H8EMKGgTo78U9lr4sj3Usi9VMwZxgKCBmr03HhFLYXOW--XMbIx0CTZF0fYIcRxmA_sVS6J7gpaB9yMcnzs5VUKokoA5JTcAPY5d1Y1VyE8KKxv51cfPgXw8OYCDbFQncw8mZPmE-VqxjFbksmk_FmghpPn9j2Ppoe-zr0LQ%3D" + } + }, + { + "title": "Successful ListInstance request with nextToken", + "input": { + "nextToken": "AAQA-EFRSURBSGhtcng0c0dxbENwUHdnckVIbkFYNU1QVjRTZWN2ak5iMFVicC8zemlHOVF3SEpjSC9WTWJVVXBMV2Z1N3ZvZlQ0WEFBQUFmakI4QmdrcWhraUc5dzBCQndhZ2J6QnRBZ0VBTUdnR0NTcUdTSWIzRFFFSEFUQWVCZ2xnaGtnQlpRTUVBUzR3RVFRTTJibW9LemgrSWZTY0RaZEdBZ0VRZ0R2dDhsQnVGbGJ0dnFTZityWmNSWEVPbG93emJoSjhxOGNMbGQ1UGMvY0VRbWlTR3pQUFd4N2RraXY5Y0ovcS9vSmFYZVBGdWVHaU0zWmd0dz09n-rC1ejA5--7ltJxpDT2xP_i8xGqDPMOZfjpp8q6l5NuP9_bnBURvwwYhdqDriMK5_f96LuPEnPbuML-ItfgEiCcUy0p2tApvpZkZqOG5fbqP-4C5aDYPTffHLyq-MMqvfrGVJzL1nvkpZcnTkVR9VJsu5b8I0qqDW0H8EMKGgTo78U9lr4sj3Usi9VMwZxgKCBmr03HhFLYXOW--XMbIx0CTZF0fYIcRxmA_sVS6J7gpaB9yMcnzs5VUKokoA5JTcAPY5d1Y1VyE8KKxv51cfPgXw8OYCDbFQncw8mZPmE-VqxjFbksmk_FmghpPn9j2Ppoe-zr0LQ%3D", + "maxResults": 1 }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn.us-gov-east-1.amazonaws.com" + "output": { + "instances": [ + { + "instanceId": "3ad8116a-644d-4172-8dcb-20e51d314c14", + "awsAccountId": "123456789012", + "state": "Initializing", + "createdTime": 17261674383136, + "lastModifiedTime": 17261674383136, + "instanceDescription": "example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false + ] + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/instance" + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "instances" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#ListInstancesRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.supplychain#InstanceNextToken", + "traits": { + "smithy.api#documentation": "

The pagination token to fetch the next page of instances.

", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.supplychain#InstanceMaxResults", + "traits": { + "smithy.api#default": 10, + "smithy.api#documentation": "

Specify the maximum number of instances to fetch in this paginated request.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "instanceNameFilter": { + "target": "com.amazonaws.supplychain#InstanceNameList", + "traits": { + "smithy.api#documentation": "

The filter to ListInstances based on their names.

", + "smithy.api#httpQuery": "instanceNameFilter" + } + }, + "instanceStateFilter": { + "target": "com.amazonaws.supplychain#InstanceStateList", + "traits": { + "smithy.api#documentation": "

The filter to ListInstances based on their state.

", + "smithy.api#httpQuery": "instanceStateFilter" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for ListInstances.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#ListInstancesResponse": { + "type": "structure", + "members": { + "instances": { + "target": "com.amazonaws.supplychain#InstanceList", + "traits": { + "smithy.api#documentation": "

The list of instances resource data details.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.supplychain#InstanceNextToken", + "traits": { + "smithy.api#documentation": "

The pagination token to fetch the next page of instances.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for ListInstances.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

List all the tags for an Amazon Web ServicesSupply Chain resource.

", + "smithy.api#examples": [ + { + "title": "Successful ListTagsForResource", + "input": { + "resourceArn": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/data-integration-flows/my_flow1" + }, + "output": { + "tags": { + "tagKey1": "tagValue1" } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "GET", + "uri": "/api/tags/{resourceArn}" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.supplychain#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.supplychain#AscResourceArn", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply chain resource ARN that needs tags to be listed.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters of ListTagsForResource.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.supplychain#TagMap", + "traits": { + "smithy.api#documentation": "

The tags added to an Amazon Web Services Supply Chain resource.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters of ListTagsForResource.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

Request references a resource which does not exist.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.supplychain#S3BucketName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9][a-z0-9.-]*[a-z0-9]$" + } + }, + "com.amazonaws.supplychain#SendDataIntegrationEvent": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#SendDataIntegrationEventRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#SendDataIntegrationEventResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#ConflictException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. \n New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion.

", + "smithy.api#examples": [ + { + "title": "Successful SendDataIntegrationEvent for inboundorder event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.inboundorder", + "data": "{\"id\": \"inbound-order-id-test-123\", \"tpartner_id\": \"partner-id-test-123\" }", + "eventGroupId": "inboundOrderId", + "eventTimestamp": 1.515531081123E9 + }, + "output": { + "eventId": "c4132c1d-8f60-44a2-9932-f723c4f7b8a7" + } + }, + { + "title": "Successful SendDataIntegrationEvent for inboundorderline event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.inboundorderline", + "data": "{\"id\": \"inbound-order-line-id-test-123\", \"order_id\": \"order-id-test-123\", \"tpartner_id\": \"partner-id-test-123\", \"product_id\": \"product-id-test-123\", \"quantity_submitted\": \"100.0\" }", + "eventGroupId": "inboundOrderLineId", + "eventTimestamp": 1.515531081123E9 + }, + "output": { + "eventId": "45d95db2-d106-40e0-aa98-f1204230a691" + } + }, + { + "title": "Successful SendDataIntegrationEvent for inboundorderlineschedule event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.inboundorderlineschedule", + "data": "{\"id\": \"inbound-order-line-schedule-id-test-123\", \"order_id\": \"order-id-test-123\", \"order_line_id\": \"order-line-id-test-123\", \"product_id\": \"product-id-test-123\"}", + "eventGroupId": "inboundOrderLineScheduleId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } + "output": { + "eventId": "5abba995-7735-4d1e-95c4-7cc93e48cf9f" + } + }, + { + "title": "Successful SendDataIntegrationEvent for forecast event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.forecast", + "data": "{\"snapshot_date\": \"1672470400000\", \"product_id\": \"product-id-test-123\", \"site_id\": \"site-id-test-123\", \"region_id\": \"region-id-test-123\", \"product_group_id\": \"product-group-id-test-123\", \"forecast_start_dttm\": \"1672470400000\", \"forecast_end_dttm\": \"1672470400000\" }", + "eventGroupId": "forecastId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } + "output": { + "eventId": "29312d5b-f499-4dcd-b017-3dab3cd34d61" + } + }, + { + "title": "Successful SendDataIntegrationEvent for inventorylevel event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.inventorylevel", + "data": "{\"snapshot_date\": \"1672470400000\", \"site_id\": \"site-id-test-123\", \"product_id\": \"product-id-test-123\", \"on_hand_inventory\": \"100.0\", \"inv_condition\": \"good\", \"lot_number\": \"lot-number-test-123\"}", + "eventGroupId": "inventoryLevelId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } + "output": { + "eventId": "3aa78324-acd8-4fdd-a19e-231ea003c2b3" + } + }, + { + "title": "Successful SendDataIntegrationEvent for outboundorderline event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.outboundorderline", + "data": "{\"id\": \"outbound-orderline-id-test-123\", \"cust_order_id\": \"cust-order-id-test-123\", \"product_id\": \"product-id-test-123\" }", + "eventGroupId": "outboundOrderLineId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } + "output": { + "eventId": "959b7ef9-5e2d-4795-b1ca-5b16a3eb6b89" + } + }, + { + "title": "Successful SendDataIntegrationEvent for outboundshipment event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.outboundshipment", + "data": "{\"id\": \"outbound-shipment-id-test-123\", \"cust_order_id\": \"cust-order-id-test-123\", \"cust_order_line_id\": \"cust-order-line-id-test-123\", \"product_id\": \"product-id-test-123\" }", + "eventGroupId": "outboundShipmentId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } + "output": { + "eventId": "59feded3-5e46-4126-81bf-0137ca176ee0" + } + }, + { + "title": "Successful SendDataIntegrationEvent for processheader event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.processheader", + "data": "{\"process_id\": \"process-id-test-123\" }", + "eventGroupId": "processHeaderId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } + "output": { + "eventId": "564130eb-2d8a-4550-a768-ddf0daf7b4a9" + } + }, + { + "title": "Successful SendDataIntegrationEvent for processoperation event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.processoperation", + "data": "{\"process_operation_id\": \"process-operation-id-test-123\", \"process_id\": \"process-id-test-123\" }", + "eventGroupId": "processOperationId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } + "output": { + "eventId": "db5df408-89c7-4b9f-a326-016f6c2b3396" + } + }, + { + "title": "Successful SendDataIntegrationEvent for processproduct event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.processproduct", + "data": "{\"process_product_id\": \"process-product-id-test-123\", \"process_id\": \"process-id-test-123\" }", + "eventGroupId": "processProductId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://scn.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } + "output": { + "eventId": "6929b275-485e-4035-a798-99077ca6d669" + } + }, + { + "title": "Successful SendDataIntegrationEvent for reservation event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.reservation", + "data": "{\"reservation_id\": \"reservation-id-test-123\", \"reservation_detail_id\": \"reservation-detail-id-test-123\" }", + "eventGroupId": "reservationId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } + "output": { + "eventId": "f6c55a8b-fde2-44f6-848a-9b4336c77209" + } + }, + { + "title": "Successful SendDataIntegrationEvent for shipment event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.shipment", + "data": "{\"id\": \"shipment-id-test-123\", \"supplier_tpartner_id\": \"supplier-tpartner-id-test-123\", \"product_id\": \"product-id-test-123\", \"order_id\": \"order-id-test-123\", \"order_line_id\": \"order-line-id-test-123\", \"package_id\": \"package-id-test-123\" }", + "eventGroupId": "shipmentId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } + "output": { + "eventId": "61d079d8-3f56-49bb-b35a-c0271a4e4f0a" + } + }, + { + "title": "Successful SendDataIntegrationEvent for shipmentstop event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.shipmentstop", + "data": "{\"shipment_stop_id\": \"shipment-stop-id-test-123\", \"shipment_id\": \"shipment-id-test-123\" }", + "eventGroupId": "shipmentStopId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } + "output": { + "eventId": "3610992a-fc2f-4da4-9beb-724994622ba1" + } + }, + { + "title": "Successful SendDataIntegrationEvent for shipmentstoporder event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.shipmentstoporder", + "data": "{\"shipment_stop_order_id\": \"shipment-stop-order-id-test-123\", \"shipment_stop_id\": \"shipment-stop-id-test-123\", \"shipment_id\": \"shipment-id-test-123\" }", + "eventGroupId": "shipmentStopOrderId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } + "output": { + "eventId": "1d550a60-9321-4d25-a132-9dd4b2d9e934" + } + }, + { + "title": "Successful SendDataIntegrationEvent for supplyplan event type", + "input": { + "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", + "eventType": "scn.data.supplyplan", + "data": "{\"supply_plan_id\": \"supply-plan-id-test-123\" }", + "eventGroupId": "supplyPlanId", + "eventTimestamp": 1.515531081123E9 }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } + "output": { + "eventId": "9abaee56-5dc4-4c31-8250-3206a651d8a1" } - ], - "version": "1.0" + } + ], + "smithy.api#http": { + "code": 200, + "method": "POST", + "uri": "/api-data/data-integration/instance/{instanceId}/data-integration-events" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#SendDataIntegrationEventRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "eventType": { + "target": "com.amazonaws.supplychain#DataIntegrationEventType", + "traits": { + "smithy.api#documentation": "

The data event type.

", + "smithy.api#required": {} + } + }, + "data": { + "target": "com.amazonaws.supplychain#DataIntegrationEventData", + "traits": { + "smithy.api#documentation": "

The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain\n .

", + "smithy.api#required": {} + } + }, + "eventGroupId": { + "target": "com.amazonaws.supplychain#DataIntegrationEventGroupId", + "traits": { + "smithy.api#documentation": "

Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning.

", + "smithy.api#required": {} + } + }, + "eventTimestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

The event timestamp (in epoch seconds).

", + "smithy.api#timestampFormat": "epoch-seconds" + } + }, + "clientToken": { + "target": "com.amazonaws.supplychain#ClientToken", + "traits": { + "smithy.api#documentation": "

The idempotent client token.

", + "smithy.api#idempotencyToken": {} + } } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for SendDataIntegrationEvent.

", + "smithy.api#input": {} } }, - "com.amazonaws.supplychain#GetBillOfMaterialsImportJob": { + "com.amazonaws.supplychain#SendDataIntegrationEventResponse": { + "type": "structure", + "members": { + "eventId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The unique event identifier.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for SendDataIntegrationEvent.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

Request would cause a service quota to be exceeded.

", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.supplychain#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.supplychain#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.supplychain#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.supplychain#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.supplychain#TagKey" + }, + "value": { + "target": "com.amazonaws.supplychain#TagValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.supplychain#TagResource": { "type": "operation", "input": { - "target": "com.amazonaws.supplychain#GetBillOfMaterialsImportJobRequest" + "target": "com.amazonaws.supplychain#TagResourceRequest" }, "output": { - "target": "com.amazonaws.supplychain#GetBillOfMaterialsImportJobResponse" + "target": "com.amazonaws.supplychain#TagResourceResponse" }, "errors": [ { @@ -1160,141 +5064,191 @@ } ], "traits": { - "smithy.api#documentation": "

Get status and details of a BillOfMaterialsImportJob.

", + "smithy.api#documentation": "

Create tags for an Amazon Web Services Supply chain resource.

", "smithy.api#examples": [ { - "title": "Invoke GetBillOfMaterialsImportJob for a successful job", + "title": "Successful TagResource", "input": { - "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", - "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4" - }, - "output": { - "job": { - "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", - "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4", - "status": "SUCCESS", - "s3uri": "s3://mybucketname/pathelemene/file.csv", - "message": "Import job completed successfully." + "resourceArn": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/data-integration-flows/my_flow1", + "tags": { + "tagKey1": "tagValue1" } - } - }, - { - "title": "Invoke GetBillOfMaterialsImportJob for an in-progress job", - "input": { - "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", - "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4" }, - "output": { - "job": { - "instanceId": "60f82bbd-71f7-4fcd-a941-472f574c5243", - "jobId": "f79b359b-1515-4436-a3bf-bae7b33e47b4", - "status": "IN_PROGRESS", - "s3uri": "s3://mybucketname/pathelemene/file.csv" - } - } + "output": {} } ], "smithy.api#http": { "code": 200, - "method": "GET", - "uri": "/api/configuration/instances/{instanceId}/bill-of-materials-import-jobs/{jobId}" - }, - "smithy.api#readonly": {} + "method": "POST", + "uri": "/api/tags/{resourceArn}" + } } }, - "com.amazonaws.supplychain#GetBillOfMaterialsImportJobRequest": { + "com.amazonaws.supplychain#TagResourceRequest": { "type": "structure", "members": { - "instanceId": { - "target": "com.amazonaws.supplychain#UUID", + "resourceArn": { + "target": "com.amazonaws.supplychain#AscResourceArn", "traits": { - "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#documentation": "

The Amazon Web Services Supply chain resource ARN that needs to be tagged.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "jobId": { - "target": "com.amazonaws.supplychain#UUID", + "tags": { + "target": "com.amazonaws.supplychain#TagMap", "traits": { - "smithy.api#documentation": "

The BillOfMaterialsImportJob identifier.

", - "smithy.api#httpLabel": {}, + "smithy.api#documentation": "

The tags of the Amazon Web Services Supply chain resource to be created.

", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The request parameters for GetBillOfMaterialsImportJob.

", + "smithy.api#documentation": "

The request parameters of TagResource.

", "smithy.api#input": {} } }, - "com.amazonaws.supplychain#GetBillOfMaterialsImportJobResponse": { + "com.amazonaws.supplychain#TagResourceResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

The response parameters for TagResource.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.supplychain#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

Request was denied due to request throttling.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.supplychain#UUID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + } + }, + "com.amazonaws.supplychain#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Delete tags for an Amazon Web Services Supply chain resource.

", + "smithy.api#examples": [ + { + "title": "Successful UntagResource", + "input": { + "resourceArn": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/data-integration-flows/my_flow1", + "tagKeys": [ + "tagKey1" + ] + }, + "output": {} + } + ], + "smithy.api#http": { + "code": 200, + "method": "DELETE", + "uri": "/api/tags/{resourceArn}" + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.supplychain#UntagResourceRequest": { "type": "structure", "members": { - "job": { - "target": "com.amazonaws.supplychain#BillOfMaterialsImportJob", + "resourceArn": { + "target": "com.amazonaws.supplychain#AscResourceArn", "traits": { - "smithy.api#documentation": "

The BillOfMaterialsImportJob.

", - "smithy.api#nestedProperties": {}, + "smithy.api#documentation": "

The Amazon Web Services Supply chain resource ARN that needs to be untagged.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.supplychain#TagKeyList", + "traits": { + "smithy.api#documentation": "

The list of tag keys to be deleted for an Amazon Web Services Supply Chain resource.

", + "smithy.api#httpQuery": "tagKeys", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The response parameters for GetBillOfMaterialsImportJob.

", - "smithy.api#output": {} - } - }, - "com.amazonaws.supplychain#InternalServerException": { - "type": "structure", - "members": { - "message": { - "target": "smithy.api#String" - } - }, - "traits": { - "smithy.api#documentation": "

Unexpected error during processing of request.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500, - "smithy.api#retryable": {} + "smithy.api#documentation": "

The request parameters of UntagResource.

", + "smithy.api#input": {} } }, - "com.amazonaws.supplychain#ResourceNotFoundException": { + "com.amazonaws.supplychain#UntagResourceResponse": { "type": "structure", - "members": { - "message": { - "target": "smithy.api#String" - } - }, + "members": {}, "traits": { - "smithy.api#documentation": "

Request references a resource which does not exist.

", - "smithy.api#error": "client", - "smithy.api#httpError": 404 + "smithy.api#documentation": "

The response parameters of UntagResource.

", + "smithy.api#output": {} } }, - "com.amazonaws.supplychain#SendDataIntegrationEvent": { + "com.amazonaws.supplychain#UpdateDataIntegrationFlow": { "type": "operation", "input": { - "target": "com.amazonaws.supplychain#SendDataIntegrationEventRequest" + "target": "com.amazonaws.supplychain#UpdateDataIntegrationFlowRequest" }, "output": { - "target": "com.amazonaws.supplychain#SendDataIntegrationEventResponse" + "target": "com.amazonaws.supplychain#UpdateDataIntegrationFlowResponse" }, "errors": [ { "target": "com.amazonaws.supplychain#AccessDeniedException" }, - { - "target": "com.amazonaws.supplychain#ConflictException" - }, { "target": "com.amazonaws.supplychain#InternalServerException" }, { "target": "com.amazonaws.supplychain#ResourceNotFoundException" }, - { - "target": "com.amazonaws.supplychain#ServiceQuotaExceededException" - }, { "target": "com.amazonaws.supplychain#ThrottlingException" }, @@ -1303,317 +5257,619 @@ } ], "traits": { - "smithy.api#documentation": "

Send the transactional data payload for the event with real-time data for analysis or monitoring. The real-time data events are stored in an Amazon Web Services service before being processed and stored in data lake. \n New data events are synced with data lake at 5 PM GMT everyday. The updated transactional data is available in data lake after ingestion.

", + "smithy.api#documentation": "

Update the DataIntegrationFlow.

", "smithy.api#examples": [ { - "title": "Successful SendDataIntegrationEvent for inboundorder event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.inboundorder", - "data": "{\"id\": \"inbound-order-id-test-123\", \"tpartner_id\": \"partner-id-test-123\" }", - "eventGroupId": "inboundOrderId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "c4132c1d-8f60-44a2-9932-f723c4f7b8a7" - } - }, - { - "title": "Successful SendDataIntegrationEvent for inboundorderline event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.inboundorderline", - "data": "{\"id\": \"inbound-order-line-id-test-123\", \"order_id\": \"order-id-test-123\", \"tpartner_id\": \"partner-id-test-123\", \"product_id\": \"product-id-test-123\", \"quantity_submitted\": \"100.0\" }", - "eventGroupId": "inboundOrderLineId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "45d95db2-d106-40e0-aa98-f1204230a691" - } - }, - { - "title": "Successful SendDataIntegrationEvent for inboundorderlineschedule event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.inboundorderlineschedule", - "data": "{\"id\": \"inbound-order-line-schedule-id-test-123\", \"order_id\": \"order-id-test-123\", \"order_line_id\": \"order-line-id-test-123\", \"product_id\": \"product-id-test-123\"}", - "eventGroupId": "inboundOrderLineScheduleId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "5abba995-7735-4d1e-95c4-7cc93e48cf9f" - } - }, - { - "title": "Successful SendDataIntegrationEvent for forecast event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.forecast", - "data": "{\"snapshot_date\": \"1672470400000\", \"product_id\": \"product-id-test-123\", \"site_id\": \"site-id-test-123\", \"region_id\": \"region-id-test-123\", \"product_group_id\": \"product-group-id-test-123\", \"forecast_start_dttm\": \"1672470400000\", \"forecast_end_dttm\": \"1672470400000\" }", - "eventGroupId": "forecastId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "29312d5b-f499-4dcd-b017-3dab3cd34d61" - } - }, - { - "title": "Successful SendDataIntegrationEvent for inventorylevel event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.inventorylevel", - "data": "{\"snapshot_date\": \"1672470400000\", \"site_id\": \"site-id-test-123\", \"product_id\": \"product-id-test-123\", \"on_hand_inventory\": \"100.0\", \"inv_condition\": \"good\", \"lot_number\": \"lot-number-test-123\"}", - "eventGroupId": "inventoryLevelId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "3aa78324-acd8-4fdd-a19e-231ea003c2b3" - } - }, - { - "title": "Successful SendDataIntegrationEvent for outboundorderline event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.outboundorderline", - "data": "{\"id\": \"outbound-orderline-id-test-123\", \"cust_order_id\": \"cust-order-id-test-123\", \"product_id\": \"product-id-test-123\" }", - "eventGroupId": "outboundOrderLineId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "959b7ef9-5e2d-4795-b1ca-5b16a3eb6b89" - } - }, - { - "title": "Successful SendDataIntegrationEvent for outboundshipment event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.outboundshipment", - "data": "{\"id\": \"outbound-shipment-id-test-123\", \"cust_order_id\": \"cust-order-id-test-123\", \"cust_order_line_id\": \"cust-order-line-id-test-123\", \"product_id\": \"product-id-test-123\" }", - "eventGroupId": "outboundShipmentId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "59feded3-5e46-4126-81bf-0137ca176ee0" - } - }, - { - "title": "Successful SendDataIntegrationEvent for processheader event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.processheader", - "data": "{\"process_id\": \"process-id-test-123\" }", - "eventGroupId": "processHeaderId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "564130eb-2d8a-4550-a768-ddf0daf7b4a9" - } - }, - { - "title": "Successful SendDataIntegrationEvent for processoperation event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.processoperation", - "data": "{\"process_operation_id\": \"process-operation-id-test-123\", \"process_id\": \"process-id-test-123\" }", - "eventGroupId": "processOperationId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "db5df408-89c7-4b9f-a326-016f6c2b3396" - } - }, - { - "title": "Successful SendDataIntegrationEvent for processproduct event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.processproduct", - "data": "{\"process_product_id\": \"process-product-id-test-123\", \"process_id\": \"process-id-test-123\" }", - "eventGroupId": "processProductId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "6929b275-485e-4035-a798-99077ca6d669" - } - }, - { - "title": "Successful SendDataIntegrationEvent for reservation event type", - "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.reservation", - "data": "{\"reservation_id\": \"reservation-id-test-123\", \"reservation_detail_id\": \"reservation-detail-id-test-123\" }", - "eventGroupId": "reservationId", - "eventTimestamp": 1.515531081123E9 - }, - "output": { - "eventId": "f6c55a8b-fde2-44f6-848a-9b4336c77209" - } - }, - { - "title": "Successful SendDataIntegrationEvent for shipment event type", + "title": "Successful UpdateDataIntegrationFlow for s3 to dataset flow to update SQL transformation", "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.shipment", - "data": "{\"id\": \"shipment-id-test-123\", \"supplier_tpartner_id\": \"supplier-tpartner-id-test-123\", \"product_id\": \"product-id-test-123\", \"order_id\": \"order-id-test-123\", \"order_line_id\": \"order-line-id-test-123\", \"package_id\": \"package-id-test-123\" }", - "eventGroupId": "shipmentId", - "eventTimestamp": 1.515531081123E9 + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow", + "sources": [ + { + "sourceType": "S3", + "sourceName": "testSourceName", + "s3Source": { + "bucketName": "aws-supply-chain-data-b8c7bb28-a576-4334-b481-6d6e8e47371f", + "prefix": "example-prefix" + } + } + ], + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT connection_id, bukrs AS id, txtmd AS description FROM testSourceName WHERE langu = 'E'" + } + }, + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset" + } + } }, "output": { - "eventId": "61d079d8-3f56-49bb-b35a-c0271a4e4f0a" + "flow": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "testStagingFlow", + "sources": [ + { + "sourceType": "S3", + "sourceName": "testSourceName", + "s3Source": { + "bucketName": "aws-supply-chain-data-b8c7bb28-a576-4334-b481-6d6e8e47371f", + "prefix": "example-prefix" + } + } + ], + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset" + } + }, + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT connection_id, bukrs AS id, txtmd AS description FROM testSourceName WHERE langu = 'E'" + } + }, + "createdTime": 1.72495640044E9, + "lastModifiedTime": 1.73245640577E9 + } } }, { - "title": "Successful SendDataIntegrationEvent for shipmentstop event type", + "title": "Successful UpdateDataIntegrationFlow for dataset to dataset flow to update sources", "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.shipmentstop", - "data": "{\"shipment_stop_id\": \"shipment-stop-id-test-123\", \"shipment_id\": \"shipment-id-test-123\" }", - "eventGroupId": "shipmentStopId", - "eventTimestamp": 1.515531081123E9 + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "trading-partner", + "sources": [ + { + "sourceType": "DATASET", + "sourceName": "testSourceName1", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset1" + } + }, + { + "sourceType": "DATASET", + "sourceName": "testSourceName2", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset2_updated" + } + } + ], + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT S1.id AS id, S1.poc_org_unit_description AS description, S1.company_id AS company_id, S1.tpartner_type AS tpartner_type, S1.geo_id AS geo_id, S1.eff_start_date AS eff_start_date, S1.eff_end_date AS eff_end_date FROM testSourceName1 AS S1 LEFT JOIN testSourceName2 as S2 ON S1.id=S2.id" + } + }, + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/asc/datasets/trading_partner" + } + } }, "output": { - "eventId": "3610992a-fc2f-4da4-9beb-724994622ba1" + "flow": { + "instanceId": "8850c54e-e187-4fa7-89d4-6370f165174d", + "name": "trading-partner", + "sources": [ + { + "sourceType": "DATASET", + "sourceName": "testSourceName1", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset1" + } + }, + { + "sourceType": "DATASET", + "sourceName": "testSourceName2", + "datasetSource": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/default/datasets/my_staging_dataset2_updated" + } + } + ], + "transformation": { + "transformationType": "SQL", + "sqlTransformation": { + "query": "SELECT S1.id AS id, S1.poc_org_unit_description AS description, S1.company_id AS company_id, S1.tpartner_type AS tpartner_type, S1.geo_id AS geo_id, S1.eff_start_date AS eff_start_date, S1.eff_end_date AS eff_end_date FROM testSourceName1 AS S1 LEFT JOIN testSourceName2 as S2 ON S1.id=S2.id" + } + }, + "target": { + "targetType": "DATASET", + "datasetTarget": { + "datasetIdentifier": "arn:aws:scn:us-east-1:123456789012:instance/8850c54e-e187-4fa7-89d4-6370f165174d/namespaces/asc/datasets/trading_partner" + } + }, + "createdTime": 1.72495640044E9, + "lastModifiedTime": 1.73245640577E9 + } } - }, + } + ], + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/api/data-integration/instance/{instanceId}/data-integration-flows/{name}" + } + } + }, + "com.amazonaws.supplychain#UpdateDataIntegrationFlowRequest": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The Amazon Web Services Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowName", + "traits": { + "smithy.api#documentation": "

The name of the DataIntegrationFlow to be updated.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sources": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowSourceList", + "traits": { + "smithy.api#documentation": "

The new source configurations for the DataIntegrationFlow.

" + } + }, + "transformation": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTransformation", + "traits": { + "smithy.api#documentation": "

The new transformation configurations for the DataIntegrationFlow.

" + } + }, + "target": { + "target": "com.amazonaws.supplychain#DataIntegrationFlowTarget", + "traits": { + "smithy.api#documentation": "

The new target configurations for the DataIntegrationFlow.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The request parameters for UpdateDataIntegrationFlow.

", + "smithy.api#input": {} + } + }, + "com.amazonaws.supplychain#UpdateDataIntegrationFlowResponse": { + "type": "structure", + "members": { + "flow": { + "target": "com.amazonaws.supplychain#DataIntegrationFlow", + "traits": { + "smithy.api#documentation": "

The details of the updated DataIntegrationFlow.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The response parameters for UpdateDataIntegrationFlow.

", + "smithy.api#output": {} + } + }, + "com.amazonaws.supplychain#UpdateDataLakeDataset": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#UpdateDataLakeDatasetRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#UpdateDataLakeDatasetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Update a data lake dataset.

", + "smithy.api#examples": [ { - "title": "Successful SendDataIntegrationEvent for shipmentstoporder event type", + "title": "Update description of an existing AWS Supply Chain inbound order dataset", "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.shipmentstoporder", - "data": "{\"shipment_stop_order_id\": \"shipment-stop-order-id-test-123\", \"shipment_stop_id\": \"shipment-stop-id-test-123\", \"shipment_id\": \"shipment-id-test-123\" }", - "eventGroupId": "shipmentStopOrderId", - "eventTimestamp": 1.515531081123E9 + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order", + "description": "This is an updated AWS Supply Chain inbound order dataset" }, "output": { - "eventId": "1d550a60-9321-4d25-a132-9dd4b2d9e934" + "dataset": { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/asc/datasets/inbound_order", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "asc", + "name": "inbound_order", + "description": "This is an updated AWS Supply Chain inbound order dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727117453568E9, + "schema": { + "name": "InboundOrder", + "fields": [ + { + "name": "id", + "type": "STRING", + "isRequired": true + }, + { + "name": "tpartner_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "connection_id", + "type": "STRING", + "isRequired": true + }, + { + "name": "order_type", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_status", + "type": "STRING", + "isRequired": false + }, + { + "name": "inbound_order_url", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_creation_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "company_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "to_site_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "order_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "vendor_currency_uom", + "type": "STRING", + "isRequired": false + }, + { + "name": "exchange_rate", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "exchange_rate_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "incoterm", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm2", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_1", + "type": "STRING", + "isRequired": false + }, + { + "name": "incoterm_location_2", + "type": "STRING", + "isRequired": false + }, + { + "name": "submitted_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_start_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "agreement_end_date", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "shipping_instr_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "payment_terms_code", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement", + "type": "STRING", + "isRequired": false + }, + { + "name": "std_terms_agreement_ver", + "type": "STRING", + "isRequired": false + }, + { + "name": "agreement_number", + "type": "STRING", + "isRequired": false + }, + { + "name": "source", + "type": "STRING", + "isRequired": false + }, + { + "name": "source_update_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "source_event_id", + "type": "STRING", + "isRequired": false + }, + { + "name": "db_creation_dttm", + "type": "TIMESTAMP", + "isRequired": false + }, + { + "name": "db_updation_dttm", + "type": "TIMESTAMP", + "isRequired": false + } + ] + } + } } }, { - "title": "Successful SendDataIntegrationEvent for supplyplan event type", + "title": "Update description of an existing custom dataset", "input": { - "instanceId": "8928ae12-15e5-4441-825d-ec2184f0a43a", - "eventType": "scn.data.supplyplan", - "data": "{\"supply_plan_id\": \"supply-plan-id-test-123\" }", - "eventGroupId": "supplyPlanId", - "eventTimestamp": 1.515531081123E9 + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset", + "description": "This is an updated custom dataset" }, "output": { - "eventId": "9abaee56-5dc4-4c31-8250-3206a651d8a1" + "dataset": { + "arn": "arn:aws:scn:us-east-1:012345678910:instance/1877dd20-dee9-4639-8e99-cb67acf21fe5/namespaces/default/datasets/my_dataset", + "instanceId": "1877dd20-dee9-4639-8e99-cb67acf21fe5", + "namespace": "default", + "name": "my_dataset", + "description": "This is an updated custom dataset", + "createdTime": 1.727116807751E9, + "lastModifiedTime": 1.727117453568E9, + "schema": { + "name": "MyDataset", + "fields": [ + { + "name": "id", + "type": "INT", + "isRequired": true + }, + { + "name": "description", + "type": "STRING", + "isRequired": true + }, + { + "name": "price", + "type": "DOUBLE", + "isRequired": false + }, + { + "name": "creation_time", + "type": "TIMESTAMP", + "isRequired": false + } + ] + } + } } } ], "smithy.api#http": { "code": 200, - "method": "POST", - "uri": "/api-data/data-integration/instance/{instanceId}/data-integration-events" - }, - "smithy.api#idempotent": {} + "method": "PATCH", + "uri": "/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}" + } } }, - "com.amazonaws.supplychain#SendDataIntegrationEventRequest": { + "com.amazonaws.supplychain#UpdateDataLakeDatasetRequest": { "type": "structure", "members": { "instanceId": { "target": "com.amazonaws.supplychain#UUID", "traits": { - "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#documentation": "

The Amazon Web Services Chain instance identifier.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "eventType": { - "target": "com.amazonaws.supplychain#DataIntegrationEventType", - "traits": { - "smithy.api#documentation": "

The data event type.

", - "smithy.api#required": {} - } - }, - "data": { - "target": "com.amazonaws.supplychain#DataIntegrationEventData", + "namespace": { + "target": "com.amazonaws.supplychain#DataLakeDatasetNamespace", "traits": { - "smithy.api#documentation": "

The data payload of the event. For more information on the data schema to use, see Data entities supported in AWS Supply Chain\n .

", + "smithy.api#documentation": "

The name space of the dataset. The available values are:

\n ", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "eventGroupId": { - "target": "com.amazonaws.supplychain#DataIntegrationEventGroupId", + "name": { + "target": "com.amazonaws.supplychain#DataLakeDatasetName", "traits": { - "smithy.api#documentation": "

Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning.

", + "smithy.api#documentation": "

The name of the dataset. For asc name space, the name must be one of the supported data entities under https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html.

", + "smithy.api#httpLabel": {}, "smithy.api#required": {} } }, - "eventTimestamp": { - "target": "smithy.api#Timestamp", - "traits": { - "smithy.api#documentation": "

The event timestamp (in epoch seconds).

", - "smithy.api#timestampFormat": "epoch-seconds" - } - }, - "clientToken": { - "target": "com.amazonaws.supplychain#ClientToken", + "description": { + "target": "com.amazonaws.supplychain#DataLakeDatasetDescription", "traits": { - "smithy.api#documentation": "

The idempotent client token.

", - "smithy.api#idempotencyToken": {} + "smithy.api#documentation": "

The updated description of the data lake dataset.

" } } }, "traits": { - "smithy.api#documentation": "

The request parameters for SendDataIntegrationEvent.

", + "smithy.api#documentation": "

The request parameters of UpdateDataLakeDataset.

", "smithy.api#input": {} } }, - "com.amazonaws.supplychain#SendDataIntegrationEventResponse": { + "com.amazonaws.supplychain#UpdateDataLakeDatasetResponse": { "type": "structure", "members": { - "eventId": { - "target": "com.amazonaws.supplychain#UUID", + "dataset": { + "target": "com.amazonaws.supplychain#DataLakeDataset", "traits": { - "smithy.api#documentation": "

The unique event identifier.

", + "smithy.api#documentation": "

The updated dataset details.

", + "smithy.api#nestedProperties": {}, "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

The response parameters for SendDataIntegrationEvent.

", + "smithy.api#documentation": "

The response parameters of UpdateDataLakeDataset.

", "smithy.api#output": {} } }, - "com.amazonaws.supplychain#ServiceQuotaExceededException": { - "type": "structure", - "members": { - "message": { - "target": "smithy.api#String" - } + "com.amazonaws.supplychain#UpdateInstance": { + "type": "operation", + "input": { + "target": "com.amazonaws.supplychain#UpdateInstanceRequest" + }, + "output": { + "target": "com.amazonaws.supplychain#UpdateInstanceResponse" }, + "errors": [ + { + "target": "com.amazonaws.supplychain#AccessDeniedException" + }, + { + "target": "com.amazonaws.supplychain#InternalServerException" + }, + { + "target": "com.amazonaws.supplychain#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.supplychain#ThrottlingException" + }, + { + "target": "com.amazonaws.supplychain#ValidationException" + } + ], "traits": { - "smithy.api#documentation": "

Request would cause a service quota to be exceeded.

", - "smithy.api#error": "client", - "smithy.api#httpError": 402 + "smithy.api#documentation": "

Update the instance.

", + "smithy.api#examples": [ + { + "title": "Successful UpdateInstance request", + "input": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "instanceName": "updated example instance name", + "instanceDescription": "updated example instance description" + }, + "output": { + "instance": { + "instanceId": "9e193580-7cc5-45f7-9609-c43ba0ada793", + "awsAccountId": "123456789012", + "state": "Active", + "createdTime": 172615383136, + "lastModifiedTime": 172615383136, + "instanceName": "updated example instance name", + "instanceDescription": "updated example instance description", + "kmsKeyArn": "arn:aws:kms:us-west-2:123456789012:key/b14ffc39-b7d4-45ab-991a-6257a7f0d24d", + "versionNumber": 2.0 + } + } + } + ], + "smithy.api#http": { + "code": 200, + "method": "PATCH", + "uri": "/api/instance/{instanceId}" + } } }, - "com.amazonaws.supplychain#ThrottlingException": { + "com.amazonaws.supplychain#UpdateInstanceRequest": { "type": "structure", "members": { - "message": { - "target": "smithy.api#String" + "instanceId": { + "target": "com.amazonaws.supplychain#UUID", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance identifier.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "instanceName": { + "target": "com.amazonaws.supplychain#InstanceName", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance name.

" + } + }, + "instanceDescription": { + "target": "com.amazonaws.supplychain#InstanceDescription", + "traits": { + "smithy.api#documentation": "

The AWS Supply Chain instance description.

" + } } }, "traits": { - "smithy.api#documentation": "

Request was denied due to request throttling.

", - "smithy.api#error": "client", - "smithy.api#httpError": 429, - "smithy.api#retryable": { - "throttling": true - } + "smithy.api#documentation": "

The request parameters for UpdateInstance.

", + "smithy.api#input": {} } }, - "com.amazonaws.supplychain#UUID": { - "type": "string", + "com.amazonaws.supplychain#UpdateInstanceResponse": { + "type": "structure", + "members": { + "instance": { + "target": "com.amazonaws.supplychain#Instance", + "traits": { + "smithy.api#documentation": "

The instance resource data details.

", + "smithy.api#nestedProperties": {}, + "smithy.api#required": {} + } + } + }, "traits": { - "smithy.api#length": { - "min": 36, - "max": 36 - }, - "smithy.api#pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + "smithy.api#documentation": "

The response parameters for UpdateInstance.

", + "smithy.api#output": {} } }, "com.amazonaws.supplychain#ValidationException": { diff --git a/models/timestream-influxdb.json b/models/timestream-influxdb.json index e75ee83649..b7ca551f5c 100644 --- a/models/timestream-influxdb.json +++ b/models/timestream-influxdb.json @@ -939,6 +939,13 @@ "traits": { "smithy.api#documentation": "

A list of key-value pairs to associate with the DB instance.

" } + }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#default": 8086, + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

\n

Valid Values: 1024-65535

\n

Default: 8086

\n

Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680

" + } } }, "traits": { @@ -986,6 +993,12 @@ "smithy.api#documentation": "

The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.

" } }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections. The default value is 8086.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1212,7 +1225,7 @@ "min": 3, "max": 40 }, - "smithy.api#pattern": "^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$" + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$" } }, "com.amazonaws.timestreaminfluxdb#DbInstanceResource": { @@ -1282,6 +1295,12 @@ "smithy.api#documentation": "

The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.

" } }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1397,7 +1416,7 @@ "min": 3, "max": 64 }, - "smithy.api#pattern": "^[a-zA-z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$" + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z0-9]*(-[a-zA-Z0-9]+)*$" } }, "com.amazonaws.timestreaminfluxdb#DbParameterGroupResource": { @@ -1577,6 +1596,12 @@ "smithy.api#documentation": "

The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.

" } }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1672,6 +1697,60 @@ } } }, + "com.amazonaws.timestreaminfluxdb#Duration": { + "type": "structure", + "members": { + "durationType": { + "target": "com.amazonaws.timestreaminfluxdb#DurationType", + "traits": { + "smithy.api#documentation": "

The type of duration for InfluxDB parameters.

", + "smithy.api#required": {} + } + }, + "value": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

The value of duration for InfluxDB parameters.

", + "smithy.api#range": { + "min": 0 + }, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Duration for InfluxDB parameters in Timestream for InfluxDB.

" + } + }, + "com.amazonaws.timestreaminfluxdb#DurationType": { + "type": "enum", + "members": { + "HOURS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "hours" + } + }, + "MINUTES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "minutes" + } + }, + "SECONDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "seconds" + } + }, + "MILLISECONDS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "milliseconds" + } + } + } + }, "com.amazonaws.timestreaminfluxdb#GetDbInstance": { "type": "operation", "input": { @@ -1757,6 +1836,12 @@ "smithy.api#documentation": "

The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.

" } }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -1977,6 +2062,228 @@ "traits": { "smithy.api#documentation": "

Disable the HTTP /metrics endpoint which exposes internal InfluxDB metrics.

\n

Default: false

" } + }, + "httpIdleTimeout": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Maximum duration the server should keep established connections alive while waiting for new requests. Set to 0 for no timeout.

\n

Default: 3 minutes

" + } + }, + "httpReadHeaderTimeout": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Maximum duration the server should try to read HTTP headers for new requests. Set to 0 for no timeout.

\n

Default: 10 seconds

" + } + }, + "httpReadTimeout": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Maximum duration the server should try to read the entirety of new requests. Set to 0 for no timeout.

\n

Default: 0

" + } + }, + "httpWriteTimeout": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Maximum duration the server should spend processing and responding to write requests. Set to 0 for no timeout.

\n

Default: 0

" + } + }, + "influxqlMaxSelectBuckets": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Maximum number of group by time buckets a SELECT statement can create. 0 allows an unlimited number of buckets.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "influxqlMaxSelectPoint": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Maximum number of points a SELECT statement can process. 0 allows an unlimited number of points. InfluxDB checks the point count every second (so queries exceeding the maximum aren’t immediately aborted).

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "influxqlMaxSelectSeries": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Maximum number of series a SELECT statement can return. 0 allows an unlimited number of series.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "pprofDisabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Disable the /debug/pprof HTTP endpoint. This endpoint provides runtime profiling data and can be helpful when debugging.

\n

Default: false

" + } + }, + "queryInitialMemoryBytes": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Initial bytes of memory allocated for a query.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "queryMaxMemoryBytes": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Maximum number of queries allowed in execution queue. When queue limit is reached, new queries are rejected. Setting to 0 allows an unlimited number of queries in the queue.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "queryMemoryBytes": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Maximum bytes of memory allowed for a single query. Must be greater or equal to queryInitialMemoryBytes.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "sessionLength": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Specifies the Time to Live (TTL) in minutes for newly created user sessions.

\n

Default: 60

", + "smithy.api#range": { + "min": 1, + "max": 2880 + } + } + }, + "sessionRenewDisabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Disables automatically extending a user’s session TTL on each request. By default, every request sets the session’s expiration time to five minutes from now. When disabled, sessions expire after the specified session length and the user is redirected to the login page, even if recently active.

\n

Default: false

" + } + }, + "storageCacheMaxMemorySize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Maximum size (in bytes) a shard’s cache can reach before it starts rejecting writes. Must be greater than storageCacheSnapShotMemorySize and lower than instance’s total memory capacity. We recommend setting it to below 15% of the total memory capacity.

\n

Default: 1073741824

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "storageCacheSnapshotMemorySize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Size (in bytes) at which the storage engine will snapshot the cache and write it to a TSM file to make more memory available. Must not be greater than storageCacheMaxMemorySize.

\n

Default: 26214400

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "storageCacheSnapshotWriteColdDuration": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Duration at which the storage engine will snapshot the cache and write it to a new TSM file if the shard hasn’t received writes or deletes.

\n

Default: 10 minutes

" + } + }, + "storageCompactFullWriteColdDuration": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Duration at which the storage engine will compact all TSM files in a shard if it hasn't received writes or deletes.

\n

Default: 4 hours

" + } + }, + "storageCompactThroughputBurst": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Rate limit (in bytes per second) that TSM compactions can write to disk.

\n

Default: 50331648

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "storageMaxConcurrentCompactions": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Maximum number of full and level compactions that can run concurrently. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater than zero limits compactions to that value. This setting does not apply to cache snapshotting.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 64 + } + } + }, + "storageMaxIndexLogFileSize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Size (in bytes) at which an index write-ahead log (WAL) file will compact into an index file. Lower sizes will cause log files to be compacted more quickly and result in lower heap usage at the expense of write throughput.

\n

Default: 1048576

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "storageNoValidateFieldSize": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Skip field size validation on incoming write requests.

\n

Default: false

" + } + }, + "storageRetentionCheckInterval": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Interval of retention policy enforcement checks. Must be greater than 0.

\n

Default: 30 minutes

" + } + }, + "storageSeriesFileMaxConcurrentSnapshotCompactions": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Maximum number of snapshot compactions that can run concurrently across all series partitions in a database.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 64 + } + } + }, + "storageSeriesIdSetCacheSize": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

Size of the internal cache used in the TSI index to store previously calculated series results. Cached results are returned quickly rather than needing to be recalculated when a subsequent query with the same tag key/value predicate is executed. Setting this value to 0 will disable the cache and may decrease query performance.

\n

Default: 100

", + "smithy.api#range": { + "min": 0, + "max": 1000000000000 + } + } + }, + "storageWalMaxConcurrentWrites": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

Maximum number writes to the WAL directory to attempt at the same time. Setting this value to 0 results in number of processing units available x2.

\n

Default: 0

", + "smithy.api#range": { + "min": 0, + "max": 256 + } + } + }, + "storageWalMaxWriteDelay": { + "target": "com.amazonaws.timestreaminfluxdb#Duration", + "traits": { + "smithy.api#documentation": "

Maximum amount of time a write request to the WAL directory will wait when the maximum number of concurrent active writes to the WAL directory has been met. Set to 0 to disable the timeout.

\n

Default: 10 minutes

" + } + }, + "uiDisabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

Disable the InfluxDB user interface (UI). The UI is enabled by default.

\n

Default: false

" + } } }, "traits": { @@ -2193,7 +2500,21 @@ } }, "traits": { - "smithy.api#input": {} + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.timestreaminfluxdb#DbInstanceResource", + "ids": { + "dbInstanceIdentifier": "resourceArn" + } + }, + { + "resource": "com.amazonaws.timestreaminfluxdb#DbParameterGroupResource", + "ids": { + "dbParameterGroupIdentifier": "resourceArn" + } + } + ] } }, "com.amazonaws.timestreaminfluxdb#ListTagsForResourceResponse": { @@ -2299,6 +2620,15 @@ "smithy.api#sensitive": {} } }, + "com.amazonaws.timestreaminfluxdb#Port": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1024, + "max": 65535 + } + } + }, "com.amazonaws.timestreaminfluxdb#RequestTagMap": { "type": "map", "key": { @@ -2523,7 +2853,21 @@ } }, "traits": { - "smithy.api#input": {} + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.timestreaminfluxdb#DbInstanceResource", + "ids": { + "dbInstanceIdentifier": "resourceArn" + } + }, + { + "resource": "com.amazonaws.timestreaminfluxdb#DbParameterGroupResource", + "ids": { + "dbParameterGroupIdentifier": "resourceArn" + } + } + ] } }, "com.amazonaws.timestreaminfluxdb#TagValue": { @@ -2619,7 +2963,21 @@ } }, "traits": { - "smithy.api#input": {} + "smithy.api#input": {}, + "smithy.api#references": [ + { + "resource": "com.amazonaws.timestreaminfluxdb#DbInstanceResource", + "ids": { + "dbInstanceIdentifier": "resourceArn" + } + }, + { + "resource": "com.amazonaws.timestreaminfluxdb#DbParameterGroupResource", + "ids": { + "dbParameterGroupIdentifier": "resourceArn" + } + } + ] } }, "com.amazonaws.timestreaminfluxdb#UpdateDbInstance": { @@ -2681,6 +3039,12 @@ "smithy.api#documentation": "

The id of the DB parameter group to assign to your DB instance. DB parameter groups specify how the database is configured. For example, DB parameter groups can specify the limit for query concurrency.

" } }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

\n

If you change the Port value, your database restarts immediately.

\n

Valid Values: 1024-65535

\n

Default: 8086

\n

Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { @@ -2739,6 +3103,12 @@ "smithy.api#documentation": "

The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.

" } }, + "port": { + "target": "com.amazonaws.timestreaminfluxdb#Port", + "traits": { + "smithy.api#documentation": "

The port number on which InfluxDB accepts connections.

" + } + }, "dbInstanceType": { "target": "com.amazonaws.timestreaminfluxdb#DbInstanceType", "traits": { diff --git a/models/transcribe-streaming.json b/models/transcribe-streaming.json index aba0f389cc..aaba2505c6 100644 --- a/models/transcribe-streaming.json +++ b/models/transcribe-streaming.json @@ -108,7 +108,7 @@ } }, "traits": { - "smithy.api#documentation": "

One or more arguments to the StartStreamTranscription, \n StartMedicalStreamTranscription, or StartCallAnalyticsStreamTranscription \n operation was not valid. For example, MediaEncoding or LanguageCode \n used not valid values. Check the specified parameters and try your request again.

", + "smithy.api#documentation": "

One or more arguments to the StartStreamTranscription, \n StartMedicalStreamTranscription, or StartCallAnalyticsStreamTranscription \n operation was not valid. For example, MediaEncoding or LanguageCode \n used unsupported values. Check the specified parameters and try your request again.

", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -317,7 +317,7 @@ } }, "traits": { - "smithy.api#documentation": "

Contains detailed information about your Call Analytics streaming session. These details are \n provided in the UtteranceEvent and CategoryEvent objects.

", + "smithy.api#documentation": "

Contains detailed information about your real-time Call Analytics session. These details are \n provided in the UtteranceEvent and CategoryEvent objects.

", "smithy.api#streaming": {} } }, @@ -421,7 +421,7 @@ "PostCallAnalyticsSettings": { "target": "com.amazonaws.transcribestreaming#PostCallAnalyticsSettings", "traits": { - "smithy.api#documentation": "

Provides additional optional settings for your Call Analytics post-call request, including \n encryption and output locations for your redacted and unredacted transcript.

" + "smithy.api#documentation": "

Provides additional optional settings for your Call Analytics post-call request, including \n encryption and output locations for your redacted transcript.

\n

\n PostCallAnalyticsSettings provides you with the same insights as a \n Call Analytics post-call transcription. Refer to Post-call analytics for more information \n on this feature.

" } } }, @@ -732,16 +732,256 @@ "smithy.api#enumValue": "zh-CN" } }, + "TH_TH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "th-TH" + } + }, + "ES_ES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "es-ES" + } + }, + "AR_SA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ar-SA" + } + }, + "PT_PT": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pt-PT" + } + }, + "CA_ES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ca-ES" + } + }, + "AR_AE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ar-AE" + } + }, "HI_IN": { "target": "smithy.api#Unit", "traits": { "smithy.api#enumValue": "hi-IN" } }, - "TH_TH": { + "ZH_HK": { "target": "smithy.api#Unit", "traits": { - "smithy.api#enumValue": "th-TH" + "smithy.api#enumValue": "zh-HK" + } + }, + "NL_NL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "nl-NL" + } + }, + "NO_NO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "no-NO" + } + }, + "SV_SE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "sv-SE" + } + }, + "PL_PL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "pl-PL" + } + }, + "FI_FI": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "fi-FI" + } + }, + "ZH_TW": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "zh-TW" + } + }, + "EN_IN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-IN" + } + }, + "EN_IE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-IE" + } + }, + "EN_NZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-NZ" + } + }, + "EN_AB": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-AB" + } + }, + "EN_ZA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-ZA" + } + }, + "EN_WL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "en-WL" + } + }, + "DE_CH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "de-CH" + } + }, + "AF_ZA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "af-ZA" + } + }, + "EU_ES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "eu-ES" + } + }, + "HR_HR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "hr-HR" + } + }, + "CS_CZ": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "cs-CZ" + } + }, + "DA_DK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "da-DK" + } + }, + "FA_IR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "fa-IR" + } + }, + "GL_ES": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "gl-ES" + } + }, + "EL_GR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "el-GR" + } + }, + "HE_IL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "he-IL" + } + }, + "ID_ID": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "id-ID" + } + }, + "LV_LV": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "lv-LV" + } + }, + "MS_MY": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ms-MY" + } + }, + "RO_RO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ro-RO" + } + }, + "RU_RU": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ru-RU" + } + }, + "SR_RS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "sr-RS" + } + }, + "SK_SK": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "sk-SK" + } + }, + "SO_SO": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "so-SO" + } + }, + "TL_PH": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "tl-PH" + } + }, + "UK_UA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "uk-UA" + } + }, + "VI_VN": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "vi-VN" + } + }, + "ZU_ZA": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "zu-ZA" } } } @@ -1200,12 +1440,12 @@ "OutputEncryptionKMSKeyId": { "target": "com.amazonaws.transcribestreaming#String", "traits": { - "smithy.api#documentation": "

The KMS key you want to use to encrypt your Call Analytics post-call\n output.

\n

If using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:

\n
    \n
  1. \n

    Use the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  2. \n
  3. \n

    Use an alias for the KMS key ID. For example,\n alias/ExampleAlias.

    \n
  4. \n
  5. \n

    Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  6. \n
  7. \n

    Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias.

    \n
  8. \n
\n

If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:

\n
    \n
  1. \n

    Use the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  2. \n
  3. \n

    Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias.

    \n
  4. \n
\n

Note that the user making the request must\n have permission to use the specified KMS key.

" + "smithy.api#documentation": "

The KMS key you want to use to encrypt your Call Analytics post-call\n output.

\n

If using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:

\n
    \n
  1. \n

    Use the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  2. \n
  3. \n

    Use an alias for the KMS key ID. For example,\n alias/ExampleAlias.

    \n
  4. \n
  5. \n

    Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  6. \n
  7. \n

    Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias.

    \n
  8. \n
\n

If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:

\n
    \n
  1. \n

    Use the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab.

    \n
  2. \n
  3. \n

    Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias.

    \n
  4. \n
\n

Note that the role making the \n request must have permission to use the specified KMS key.

" } } }, "traits": { - "smithy.api#documentation": "

Allows you to specify additional settings for your streaming Call Analytics \n post-call request, including output locations for your redacted and unredacted \n transcript, which IAM role to use, and, optionally, which encryption key to \n use.

\n

\n ContentRedactionOutput, DataAccessRoleArn, and\n OutputLocation are required fields.

" + "smithy.api#documentation": "

Allows you to specify additional settings for your Call Analytics post-call request, \n including output locations for your redacted transcript, which IAM role to use, \n and which encryption key to use.

\n

\n DataAccessRoleArn and OutputLocation are required \n fields.

\n

\n PostCallAnalyticsSettings provides you with the same insights as a \n Call Analytics post-call transcription. Refer to Post-call analytics for more information \n on this feature.

" } }, "com.amazonaws.transcribestreaming#RequestId": { @@ -1412,7 +1652,7 @@ "LanguageCode": { "target": "com.amazonaws.transcribestreaming#CallAnalyticsLanguageCode", "traits": { - "smithy.api#documentation": "

Specify the language code that represents the language spoken in your audio.

\n

If you're unsure of the language spoken in your audio, consider using \n IdentifyLanguage to enable automatic language identification.

\n

For a list of languages supported with streaming Call Analytics, refer to the \n Supported \n languages table.

", + "smithy.api#documentation": "

Specify the language code that represents the language spoken in your audio.

\n

For a list of languages supported with real-time Call Analytics, refer to the \n Supported \n languages table.

", "smithy.api#httpHeader": "x-amzn-transcribe-language-code", "smithy.api#required": {} } @@ -1443,13 +1683,14 @@ "SessionId": { "target": "com.amazonaws.transcribestreaming#SessionId", "traits": { - "smithy.api#documentation": "

Specify a name for your Call Analytics transcription session. If you don't include this parameter\n in your request, Amazon Transcribe generates an ID and returns it in the response.

\n

You can use a session ID to retry a streaming session.

", + "smithy.api#documentation": "

Specify a name for your Call Analytics transcription session. If you don't include this parameter\n in your request, Amazon Transcribe generates an ID and returns it in the response.

", "smithy.api#httpHeader": "x-amzn-transcribe-session-id" } }, "AudioStream": { "target": "com.amazonaws.transcribestreaming#AudioStream", "traits": { + "smithy.api#documentation": "

An encoded stream of audio blobs. Audio streams are encoded as either HTTP/2 or WebSocket \n data frames.

\n

For more information, see Transcribing streaming audio.

", "smithy.api#httpPayload": {}, "smithy.api#required": {} } @@ -1493,21 +1734,21 @@ "ContentIdentificationType": { "target": "com.amazonaws.transcribestreaming#ContentIdentificationType", "traits": { - "smithy.api#documentation": "

Labels all personally identifiable information (PII) identified in your transcript.

\n

Content identification is performed at the segment level; PII specified in \n PiiEntityTypes is flagged upon complete transcription of an audio segment.

\n

You can’t set ContentIdentificationType and ContentRedactionType\n in the same request. If you set both, your request returns a\n BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", + "smithy.api#documentation": "

Labels all personally identifiable information (PII) identified in your transcript.

\n

Content identification is performed at the segment level; PII specified in \n PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't\n include PiiEntityTypes in your request, all PII is identified.

\n

You can’t set ContentIdentificationType and ContentRedactionType\n in the same request. If you set both, your request returns a\n BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", "smithy.api#httpHeader": "x-amzn-transcribe-content-identification-type" } }, "ContentRedactionType": { "target": "com.amazonaws.transcribestreaming#ContentRedactionType", "traits": { - "smithy.api#documentation": "

Redacts all personally identifiable information (PII) identified in your transcript.

\n

Content redaction is performed at the segment level; PII specified in \n PiiEntityTypes is redacted upon complete transcription of an audio segment.

\n

You can’t set ContentRedactionType and ContentIdentificationType\n in the same request. If you set both, your request returns a\n BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", + "smithy.api#documentation": "

Redacts all personally identifiable information (PII) identified in your transcript.

\n

Content redaction is performed at the segment level; PII specified in \n PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't\n include PiiEntityTypes in your request, all PII is redacted.

\n

You can’t set ContentRedactionType and ContentIdentificationType\n in the same request. If you set both, your request returns a BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", "smithy.api#httpHeader": "x-amzn-transcribe-content-redaction-type" } }, "PiiEntityTypes": { "target": "com.amazonaws.transcribestreaming#PiiEntityTypes", "traits": { - "smithy.api#documentation": "

Specify which types of personally identifiable information (PII) you want to redact in your \n transcript. You can include as many types as you'd like, or you can select \n ALL.

\n

To include PiiEntityTypes in your Call Analytics request, you must also include \n either ContentIdentificationType or ContentRedactionType.

\n

Values must be comma-separated and can include:\n BANK_ACCOUNT_NUMBER, BANK_ROUTING,\n CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, \n CREDIT_DEBIT_EXPIRY, PIN, EMAIL, \n ADDRESS, NAME, PHONE, \n SSN, or ALL.

", + "smithy.api#documentation": "

Specify which types of personally identifiable information (PII) you want to redact in your \n transcript. You can include as many types as you'd like, or you can select \n ALL.

\n

Values must be comma-separated and can include: ADDRESS, \n BANK_ACCOUNT_NUMBER, BANK_ROUTING,\n CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY,\n CREDIT_DEBIT_NUMBER, EMAIL, \n NAME, PHONE, PIN, \n SSN, or ALL.

\n

Note that if you include PiiEntityTypes in your request, you must also include \n ContentIdentificationType or ContentRedactionType.

\n

If you include ContentRedactionType or \n ContentIdentificationType in your request, but do not include \n PiiEntityTypes, all PII is redacted or identified.

", "smithy.api#httpHeader": "x-amzn-transcribe-pii-entity-types" } } @@ -1522,7 +1763,7 @@ "RequestId": { "target": "com.amazonaws.transcribestreaming#RequestId", "traits": { - "smithy.api#documentation": "

Provides the identifier for your Call Analytics streaming request.

", + "smithy.api#documentation": "

Provides the identifier for your real-time Call Analytics request.

", "smithy.api#httpHeader": "x-amzn-request-id" } }, @@ -1564,7 +1805,7 @@ "CallAnalyticsTranscriptResultStream": { "target": "com.amazonaws.transcribestreaming#CallAnalyticsTranscriptResultStream", "traits": { - "smithy.api#documentation": "

Provides detailed information about your Call Analytics streaming session.

", + "smithy.api#documentation": "

Provides detailed information about your real-time Call Analytics session.

", "smithy.api#httpPayload": {} } }, @@ -1725,7 +1966,7 @@ "SessionId": { "target": "com.amazonaws.transcribestreaming#SessionId", "traits": { - "smithy.api#documentation": "

Specify a name for your transcription session. If you don't include this parameter in \n your request, Amazon Transcribe Medical generates an ID and returns it in the\n response.

\n

You can use a session ID to retry a streaming session.

", + "smithy.api#documentation": "

Specify a name for your transcription session. If you don't include this parameter in \n your request, Amazon Transcribe Medical generates an ID and returns it in the\n response.

", "smithy.api#httpHeader": "x-amzn-transcribe-session-id" } }, @@ -1740,14 +1981,14 @@ "target": "com.amazonaws.transcribestreaming#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Enables channel identification in multi-channel audio.

\n

Channel identification transcribes the audio on each channel independently, then appends\n the output for each channel into one transcript.

\n

If you have multi-channel audio and do not enable channel identification, your audio is \n transcribed in a continuous manner and your transcript is not separated by channel.

\n

For more information, see Transcribing multi-channel audio.

", + "smithy.api#documentation": "

Enables channel identification in multi-channel audio.

\n

Channel identification transcribes the audio on each channel independently, then appends\n the output for each channel into one transcript.

\n

If you have multi-channel audio and do not enable channel identification, your audio is \n transcribed in a continuous manner and your transcript is not separated by channel.

\n

If you include EnableChannelIdentification in your request, you must also \n include NumberOfChannels.

\n

For more information, see Transcribing multi-channel audio.

", "smithy.api#httpHeader": "x-amzn-transcribe-enable-channel-identification" } }, "NumberOfChannels": { "target": "com.amazonaws.transcribestreaming#NumberOfChannels", "traits": { - "smithy.api#documentation": "

Specify the number of channels in your audio stream. Up to two channels are\n supported.

", + "smithy.api#documentation": "

Specify the number of channels in your audio stream. This value must be \n 2, as only two channels are supported. If your audio doesn't contain \n multiple channels, do not include this parameter in your request.

\n

If you include NumberOfChannels in your request, you must also \n include EnableChannelIdentification.

", "smithy.api#httpHeader": "x-amzn-transcribe-number-of-channels" } }, @@ -1934,7 +2175,7 @@ "SessionId": { "target": "com.amazonaws.transcribestreaming#SessionId", "traits": { - "smithy.api#documentation": "

Specify a name for your transcription session. If you don't include this parameter in your request, \n Amazon Transcribe generates an ID and returns it in the response.

\n

You can use a session ID to retry a streaming session.

", + "smithy.api#documentation": "

Specify a name for your transcription session. If you don't include this parameter in your request, \n Amazon Transcribe generates an ID and returns it in the response.

", "smithy.api#httpHeader": "x-amzn-transcribe-session-id" } }, @@ -1972,14 +2213,14 @@ "target": "com.amazonaws.transcribestreaming#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Enables channel identification in multi-channel audio.

\n

Channel identification transcribes the audio on each channel independently, then appends the \n output for each channel into one transcript.

\n

If you have multi-channel audio and do not enable channel identification, your audio is \n transcribed in a continuous manner and your transcript is not separated by channel.

\n

For more information, see Transcribing multi-channel audio.

", + "smithy.api#documentation": "

Enables channel identification in multi-channel audio.

\n

Channel identification transcribes the audio on each channel independently, then appends the \n output for each channel into one transcript.

\n

If you have multi-channel audio and do not enable channel identification, your audio is \n transcribed in a continuous manner and your transcript is not separated by channel.

\n

If you include EnableChannelIdentification in your request, you must also \n include NumberOfChannels.

\n

For more information, see Transcribing multi-channel audio.

", "smithy.api#httpHeader": "x-amzn-transcribe-enable-channel-identification" } }, "NumberOfChannels": { "target": "com.amazonaws.transcribestreaming#NumberOfChannels", "traits": { - "smithy.api#documentation": "

Specify the number of channels in your audio stream. Up to two channels are\n supported.

", + "smithy.api#documentation": "

Specify the number of channels in your audio stream. This value must be \n 2, as only two channels are supported. If your audio doesn't contain \n multiple channels, do not include this parameter in your request.

\n

If you include NumberOfChannels in your request, you must also \n include EnableChannelIdentification.

", "smithy.api#httpHeader": "x-amzn-transcribe-number-of-channels" } }, @@ -2001,21 +2242,21 @@ "ContentIdentificationType": { "target": "com.amazonaws.transcribestreaming#ContentIdentificationType", "traits": { - "smithy.api#documentation": "

Labels all personally identifiable information (PII) identified in your transcript.

\n

Content identification is performed at the segment level; PII specified in \n PiiEntityTypes is flagged upon complete transcription of an audio segment.

\n

You can’t set ContentIdentificationType and ContentRedactionType\n in the same request. If you set both, your request returns a\n BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", + "smithy.api#documentation": "

Labels all personally identifiable information (PII) identified in your transcript.

\n

Content identification is performed at the segment level; PII specified in \n PiiEntityTypes is flagged upon complete transcription of an audio segment. If you don't\n include PiiEntityTypes in your request, all PII is identified.

\n

You can’t set ContentIdentificationType and ContentRedactionType\n in the same request. If you set both, your request returns a\n BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", "smithy.api#httpHeader": "x-amzn-transcribe-content-identification-type" } }, "ContentRedactionType": { "target": "com.amazonaws.transcribestreaming#ContentRedactionType", "traits": { - "smithy.api#documentation": "

Redacts all personally identifiable information (PII) identified in your transcript.

\n

Content redaction is performed at the segment level; PII specified in \n PiiEntityTypes is redacted upon complete transcription of an audio segment.

\n

You can’t set ContentRedactionType and ContentIdentificationType\n in the same request. If you set both, your request returns a\n BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", + "smithy.api#documentation": "

Redacts all personally identifiable information (PII) identified in your transcript.

\n

Content redaction is performed at the segment level; PII specified in \n PiiEntityTypes is redacted upon complete transcription of an audio segment. If you don't\n include PiiEntityTypes in your request, all PII is redacted.

\n

You can’t set ContentRedactionType and ContentIdentificationType\n in the same request. If you set both, your request returns a BadRequestException.

\n

For more information, see Redacting or identifying personally identifiable\n information.

", "smithy.api#httpHeader": "x-amzn-transcribe-content-redaction-type" } }, "PiiEntityTypes": { "target": "com.amazonaws.transcribestreaming#PiiEntityTypes", "traits": { - "smithy.api#documentation": "

Specify which types of personally identifiable information (PII) you want to redact in your \n transcript. You can include as many types as you'd like, or you can select \n ALL.

\n

To include PiiEntityTypes in your request, you must also include either \n ContentIdentificationType or ContentRedactionType.

\n

Values must be comma-separated and can include:\n BANK_ACCOUNT_NUMBER, BANK_ROUTING,\n CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, \n CREDIT_DEBIT_EXPIRY, PIN, EMAIL, \n ADDRESS, NAME, PHONE, \n SSN, or ALL.

", + "smithy.api#documentation": "

Specify which types of personally identifiable information (PII) you want to redact in your \n transcript. You can include as many types as you'd like, or you can select \n ALL.

\n

Values must be comma-separated and can include: ADDRESS, \n BANK_ACCOUNT_NUMBER, BANK_ROUTING,\n CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY,\n CREDIT_DEBIT_NUMBER, EMAIL, \n NAME, PHONE, PIN, \n SSN, or ALL.

\n

Note that if you include PiiEntityTypes in your request, you must also include \n ContentIdentificationType or ContentRedactionType.

\n

If you include ContentRedactionType or \n ContentIdentificationType in your request, but do not include \n PiiEntityTypes, all PII is redacted or identified.

", "smithy.api#httpHeader": "x-amzn-transcribe-pii-entity-types" } }, @@ -2030,14 +2271,14 @@ "target": "com.amazonaws.transcribestreaming#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Enables automatic language identification for your transcription.

\n

If you include IdentifyLanguage, you can optionally include a list of \n language codes, using LanguageOptions, that you think may be present in \n your audio stream. Including language options can improve transcription accuracy.

\n

You can also include a preferred language using PreferredLanguage. Adding a \n preferred language can help Amazon Transcribe identify the language faster than if you omit this \n parameter.

\n

If you have multi-channel audio that contains different languages on each channel, and you've \n enabled channel identification, automatic language identification identifies the dominant language on \n each audio channel.

\n

Note that you must include either LanguageCode or \n IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job\n fails.

\n

Streaming language identification can't be combined with custom language models or \n redaction.

", + "smithy.api#documentation": "

Enables automatic language identification for your transcription.

\n

If you include IdentifyLanguage, you must include a list of\n language codes, using LanguageOptions, that you think may be present in \n your audio stream.

\n

You can also include a preferred language using PreferredLanguage. Adding a \n preferred language can help Amazon Transcribe identify the language faster than if you omit this \n parameter.

\n

If you have multi-channel audio that contains different languages on each channel, and you've \n enabled channel identification, automatic language identification identifies the dominant language on \n each audio channel.

\n

Note that you must include either LanguageCode or \n IdentifyLanguage or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job\n fails.

\n

Streaming language identification can't be combined with custom language models or \n redaction.

", "smithy.api#httpHeader": "x-amzn-transcribe-identify-language" } }, "LanguageOptions": { "target": "com.amazonaws.transcribestreaming#LanguageOptions", "traits": { - "smithy.api#documentation": "

Specify two or more language codes that represent the languages you think may be present \n in your media; including more than five is not recommended. If you're unsure what languages are present, do\n not include this parameter.

\n

Including language options can improve the accuracy of language identification.

\n

If you include LanguageOptions in your request, you must also include \n IdentifyLanguage.

\n

For a list of languages supported with Amazon Transcribe streaming, refer to the \n Supported \n languages table.

\n \n

You can only include one language dialect per language per stream. For example, you\n cannot include en-US and en-AU in the same request.

\n
", + "smithy.api#documentation": "

Specify two or more language codes that represent the languages you think may be present \n in your media; including more than five is not recommended.

\n

Including language options can improve the accuracy of language identification.

\n

If you include LanguageOptions in your request, you must also include \n IdentifyLanguage or IdentifyMultipleLanguages.

\n

For a list of languages supported with Amazon Transcribe streaming, refer to the \n Supported \n languages table.

\n \n

You can only include one language dialect per language per stream. For example, you\n cannot include en-US and en-AU in the same request.

\n
", "smithy.api#httpHeader": "x-amzn-transcribe-language-options" } }, @@ -2052,7 +2293,7 @@ "target": "com.amazonaws.transcribestreaming#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead.

\n

If you include IdentifyMultipleLanguages, you can optionally include a list of language codes, using LanguageOptions, that you think may be present in your stream. Including LanguageOptions restricts IdentifyMultipleLanguages to only the language options that you specify, which can improve transcription accuracy.

\n

If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames.

\n

Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails.

", + "smithy.api#documentation": "

Enables automatic multi-language identification in your transcription job request. Use this parameter if your stream contains more than one language. If your stream contains only one language, use IdentifyLanguage instead.

\n

If you include IdentifyMultipleLanguages, you must include a list of language codes, using LanguageOptions, that you think may be present in your stream.

\n

If you want to apply a custom vocabulary or a custom vocabulary filter to your automatic multiple language identification request, include VocabularyNames or VocabularyFilterNames.

\n

Note that you must include one of LanguageCode, IdentifyLanguage, or IdentifyMultipleLanguages in your request. If you include more than one of these parameters, your transcription job fails.

", "smithy.api#httpHeader": "x-amzn-transcribe-identify-multiple-languages" } }, @@ -2153,7 +2394,7 @@ "target": "com.amazonaws.transcribestreaming#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "

Shows whether channel identification was enabled for your transcription.

", + "smithy.api#documentation": "

Shows whether channel identification was enabled for your transcription.

", "smithy.api#httpHeader": "x-amzn-transcribe-enable-channel-identification" } }, diff --git a/models/transfer.json b/models/transfer.json index 9ae16fbdda..d14a580219 100644 --- a/models/transfer.json +++ b/models/transfer.json @@ -431,6 +431,52 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.transfer#ConnectorFileTransferResult": { + "type": "structure", + "members": { + "FilePath": { + "target": "com.amazonaws.transfer#FilePath", + "traits": { + "smithy.api#documentation": "

The filename and path to where the file was sent to or retrieved from.

", + "smithy.api#required": {} + } + }, + "StatusCode": { + "target": "com.amazonaws.transfer#TransferTableStatus", + "traits": { + "smithy.api#documentation": "

The current status for the transfer.

", + "smithy.api#required": {} + } + }, + "FailureCode": { + "target": "com.amazonaws.transfer#FailureCode", + "traits": { + "smithy.api#documentation": "

For transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND\n

" + } + }, + "FailureMessage": { + "target": "com.amazonaws.transfer#Message", + "traits": { + "smithy.api#documentation": "

For transfers that fail, this parameter describes the reason for the failure.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A structure that contains the details for files transferred using an SFTP connector, during a single transfer.

" + } + }, + "com.amazonaws.transfer#ConnectorFileTransferResults": { + "type": "list", + "member": { + "target": "com.amazonaws.transfer#ConnectorFileTransferResult" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1000 + } + } + }, "com.amazonaws.transfer#ConnectorId": { "type": "string", "traits": { @@ -698,7 +744,7 @@ "BaseDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "

The landing directory (folder) for files transferred by using the AS2 protocol.

\n

A BaseDirectory example is\n /DOC-EXAMPLE-BUCKET/home/mydirectory.

", + "smithy.api#documentation": "

The landing directory (folder) for files transferred by using the AS2 protocol.

\n

A BaseDirectory example is\n /amzn-s3-demo-bucket/home/mydirectory.

", "smithy.api#required": {} } }, @@ -3669,7 +3715,7 @@ } }, "traits": { - "smithy.api#documentation": "

The virtual private cloud (VPC) endpoint settings that are configured for your file\n transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server\n and resources only within your VPC. To control incoming internet traffic, invoke the\n UpdateServer API and attach an Elastic IP address to your server's\n endpoint.

\n \n

After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.

\n

For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

\n
" + "smithy.api#documentation": "

The virtual private cloud (VPC) endpoint settings that are configured for your file\n transfer protocol-enabled server. With a VPC endpoint, you can restrict access to your server\n and resources only within your VPC. To control incoming internet traffic, invoke the\n UpdateServer API and attach an Elastic IP address to your server's\n endpoint.

\n \n

After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.

\n

For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

\n

It is recommended that you use VPC as the EndpointType. With\n this endpoint type, you have the option to directly associate up to three Elastic IPv4\n addresses (BYO IP included) with your server's endpoint and use VPC security groups to\n restrict traffic by the client's public IP address. This is not possible with\n EndpointType set to VPC_ENDPOINT.

\n
" } }, "com.amazonaws.transfer#EndpointType": { @@ -3880,6 +3926,9 @@ "smithy.api#pattern": "^S-1-[\\d-]+$" } }, + "com.amazonaws.transfer#FailureCode": { + "type": "string" + }, "com.amazonaws.transfer#FileLocation": { "type": "structure", "members": { @@ -4879,6 +4928,97 @@ "smithy.api#output": {} } }, + "com.amazonaws.transfer#ListFileTransferResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.transfer#ListFileTransferResultsRequest" + }, + "output": { + "target": "com.amazonaws.transfer#ListFileTransferResultsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.transfer#InternalServiceError" + }, + { + "target": "com.amazonaws.transfer#InvalidRequestException" + }, + { + "target": "com.amazonaws.transfer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.transfer#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "

\n Returns real-time updates and detailed information on the status of each individual file being transferred in a specific file transfer operation. \n You specify the file transfer by providing its ConnectorId and its TransferId.

\n \n

File transfer results are available up to 7 days after an operation has been requested.

\n
", + "smithy.api#http": { + "method": "POST", + "uri": "/listFileTransferResults" + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "FileTransferResults", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.transfer#ListFileTransferResultsRequest": { + "type": "structure", + "members": { + "ConnectorId": { + "target": "com.amazonaws.transfer#ConnectorId", + "traits": { + "smithy.api#documentation": "

A unique identifier for a connector. This value should match the value supplied to the corresponding StartFileTransfer call.

", + "smithy.api#required": {} + } + }, + "TransferId": { + "target": "com.amazonaws.transfer#TransferId", + "traits": { + "smithy.api#documentation": "

A unique identifier for a file transfer. This value should match the value supplied to the corresponding StartFileTransfer call.

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.transfer#NextToken", + "traits": { + "smithy.api#documentation": "

If there are more file details than returned in this call, use this value for a subsequent call to ListFileTransferResults to retrieve them.

" + } + }, + "MaxResults": { + "target": "com.amazonaws.transfer#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of files to return in a single page. Note that currently you can specify a maximum of 10 file paths in a single\n StartFileTransfer operation. Thus, the maximum\n number of file transfer results that can be returned in a single page is 10.\n

" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.transfer#ListFileTransferResultsResponse": { + "type": "structure", + "members": { + "FileTransferResults": { + "target": "com.amazonaws.transfer#ConnectorFileTransferResults", + "traits": { + "smithy.api#documentation": "

Returns the details for the files transferred in the transfer identified by the TransferId and ConnectorId specified.

\n
    \n
  • \n

    \n FilePath: the filename and path to where the file was sent to or retrieved from.

    \n
  • \n
  • \n

    \n StatusCode: current status for the transfer. The status returned is one of the following values:QUEUED,\n IN_PROGRESS, COMPLETED, or FAILED\n

    \n
  • \n
  • \n

    \n FailureCode: for transfers that fail, this parameter contains a code indicating the reason. For example, RETRIEVE_FILE_NOT_FOUND\n

    \n
  • \n
  • \n

    \n FailureMessage: for transfers that fail, this parameter describes the reason for the failure.

    \n
  • \n
", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.transfer#NextToken", + "traits": { + "smithy.api#documentation": "

Returns a token that you can use to call ListFileTransferResults again and receive\n additional results, if there are any (against the same TransferId.

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.transfer#ListHostKeys": { "type": "operation", "input": { @@ -7244,7 +7384,7 @@ "SendFilePaths": { "target": "com.amazonaws.transfer#FilePaths", "traits": { - "smithy.api#documentation": "

One or more source paths for the Amazon S3 storage. Each string represents a source\n file path for one outbound file transfer. For example,\n \n DOC-EXAMPLE-BUCKET/myfile.txt\n .

\n \n

Replace \n DOC-EXAMPLE-BUCKET\n with one of your actual buckets.

\n
" + "smithy.api#documentation": "

One or more source paths for the Amazon S3 storage. Each string represents a source\n file path for one outbound file transfer. For example,\n \n amzn-s3-demo-bucket/myfile.txt\n .

\n \n

Replace \n amzn-s3-demo-bucket\n with one of your actual buckets.

\n
" } }, "RetrieveFilePaths": { @@ -7851,6 +7991,9 @@ { "target": "com.amazonaws.transfer#ListExecutions" }, + { + "target": "com.amazonaws.transfer#ListFileTransferResults" + }, { "target": "com.amazonaws.transfer#ListHostKeys" }, @@ -7930,7 +8073,7 @@ "name": "transfer" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

Transfer Family is a fully managed service that enables the transfer of files over the File\n Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File\n Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS.\n Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3.\n Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating\n with existing authentication systems, and providing DNS routing with Amazon Route 53 so\n nothing changes for your customers and partners, or their applications. With your data in\n Amazon S3, you can use it with Amazon Web Services for processing, analytics, machine learning, and\n archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and\n set up.

", + "smithy.api#documentation": "

Transfer Family is a fully managed service that enables the transfer of files over the File\n Transfer Protocol (FTP), File Transfer Protocol over SSL (FTPS), or Secure Shell (SSH) File\n Transfer Protocol (SFTP) directly into and out of Amazon Simple Storage Service (Amazon S3) or Amazon EFS.\n Additionally, you can use Applicability Statement 2 (AS2) to transfer files into and out of Amazon S3.\n Amazon Web Services helps you seamlessly migrate your file transfer workflows to Transfer Family by integrating\n with existing authentication systems, and providing DNS routing with Amazon Route 53 so\n nothing changes for your customers and partners, or their applications. With your data in\n Amazon S3, you can use it with Amazon Web Services services for processing, analytics, machine learning, and\n archiving. Getting started with Transfer Family is easy since there is no infrastructure to buy and\n set up.

", "smithy.api#title": "AWS Transfer Family", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -8913,6 +9056,35 @@ } } }, + "com.amazonaws.transfer#TransferTableStatus": { + "type": "enum", + "members": { + "QUEUED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "QUEUED" + } + }, + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "COMPLETED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETED" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.transfer#UntagResource": { "type": "operation", "input": { @@ -9153,7 +9325,7 @@ "BaseDirectory": { "target": "com.amazonaws.transfer#HomeDirectory", "traits": { - "smithy.api#documentation": "

To change the landing directory (folder) for files that are transferred, provide the\n bucket folder that you want to use; for example,\n /DOC-EXAMPLE-BUCKET/home/mydirectory\n .

" + "smithy.api#documentation": "

To change the landing directory (folder) for files that are transferred, provide the\n bucket folder that you want to use; for example,\n /amzn-s3-demo-bucket/home/mydirectory\n .

" } }, "AccessRole": { @@ -9611,7 +9783,7 @@ "EndpointType": { "target": "com.amazonaws.transfer#EndpointType", "traits": { - "smithy.api#documentation": "

The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC)\n or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and \n resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.

\n \n

After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Servicesaccount on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.

\n

For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

\n

It is recommended that you use VPC as the EndpointType. With\n this endpoint type, you have the option to directly associate up to three Elastic IPv4\n addresses (BYO IP included) with your server's endpoint and use VPC security groups to\n restrict traffic by the client's public IP address. This is not possible with\n EndpointType set to VPC_ENDPOINT.

\n
" + "smithy.api#documentation": "

The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC)\n or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and \n resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.

\n \n

After May 19, 2021, you won't be able to create a server using\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account if your account hasn't already\n done so before May 19, 2021. If you have already created servers with\n EndpointType=VPC_ENDPOINT in your Amazon Web Services account on or before May 19, 2021,\n you will not be affected. After this date, use\n EndpointType=VPC.

\n

For more information, see\n https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint.

\n

It is recommended that you use VPC as the EndpointType. With\n this endpoint type, you have the option to directly associate up to three Elastic IPv4\n addresses (BYO IP included) with your server's endpoint and use VPC security groups to\n restrict traffic by the client's public IP address. This is not possible with\n EndpointType set to VPC_ENDPOINT.

\n
" } }, "HostKey": { @@ -9975,13 +10147,13 @@ "OnUpload": { "target": "com.amazonaws.transfer#OnUploadWorkflowDetails", "traits": { - "smithy.api#documentation": "

A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.

\n

To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

\n

\n aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'\n

" + "smithy.api#documentation": "

A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.

\n

To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example.

\n

\n aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'\n

\n \n

\n OnUpload can contain a maximum of one WorkflowDetail object.

\n
" } }, "OnPartialUpload": { "target": "com.amazonaws.transfer#OnPartialUploadWorkflowDetails", "traits": { - "smithy.api#documentation": "

A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server\n that executes whenever there is a partial upload.

\n

A partial upload occurs when a file is open when the session disconnects.

" + "smithy.api#documentation": "

A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server\n that executes whenever there is a partial upload.

\n

A partial upload occurs when a file is open when the session disconnects.

\n \n

\n OnPartialUpload can contain a maximum of one WorkflowDetail object.

\n
" } } }, diff --git a/models/verifiedpermissions.json b/models/verifiedpermissions.json index dfcd3abd40..01f3eb95e5 100644 --- a/models/verifiedpermissions.json +++ b/models/verifiedpermissions.json @@ -151,6 +151,163 @@ "name": "IsAuthorized" }, "smithy.api#documentation": "

Makes a series of decisions about multiple authorization requests for one principal or\n resource. Each request contains the equivalent content of an IsAuthorized\n request: principal, action, resource, and context. Either the principal or\n the resource parameter must be identical across all requests. For example,\n Verified Permissions won't evaluate a pair of requests where bob views\n photo1 and alice views photo2. Authorization\n of bob to view photo1 and photo2, or\n bob and alice to view photo1, are valid\n batches.

\n

The request is evaluated against all policies in the specified policy store that match the\n entities that you declare. The result of the decisions is a series of Allow\n or Deny responses, along with the IDs of the policies that produced each\n decision.

\n

The entities of a BatchIsAuthorized API request can contain\n up to 100 principals and up to 100 resources. The requests of a\n BatchIsAuthorized API request can contain up to 30 requests.

\n \n

The BatchIsAuthorized operation doesn't have its own IAM\n permission. To authorize this operation for Amazon Web Services principals, include the permission\n verifiedpermissions:IsAuthorized in their IAM policies.

\n
", + "smithy.api#examples": [ + { + "title": "Batch - Example 1", + "documentation": "The following example requests two authorization decisions for two principals of type Usernamed Alice and Annalisa.", + "input": { + "requests": [ + { + "principal": { + "entityType": "PhotoFlash::User", + "entityId": "Alice" + }, + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "ViewPhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + { + "principal": { + "entityType": "PhotoFlash::User", + "entityId": "Annalisa" + }, + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "DeletePhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + } + ], + "entities": { + "entityList": [ + { + "identifier": { + "entityType": "PhotoFlash::User", + "entityId": "Alice" + }, + "attributes": { + "Account": { + "entityIdentifier": { + "entityType": "PhotoFlash::Account", + "entityId": "1234" + } + }, + "Email": { + "string": "" + } + }, + "parents": [] + }, + { + "identifier": { + "entityType": "PhotoFlash::User", + "entityId": "Annalisa" + }, + "attributes": { + "Account": { + "entityIdentifier": { + "entityType": "PhotoFlash::Account", + "entityId": "5678" + } + }, + "Email": { + "string": "" + } + }, + "parents": [] + }, + { + "identifier": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + }, + "attributes": { + "IsPrivate": { + "boolean": false + }, + "Name": { + "string": "" + } + }, + "parents": [ + { + "entityType": "PhotoFlash::Account", + "entityId": "1234" + } + ] + }, + { + "identifier": { + "entityType": "PhotoFlash::Account", + "entityId": "1234" + }, + "attributes": { + "Name": { + "string": "" + } + }, + "parents": [] + } + ] + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "results": [ + { + "request": { + "principal": { + "entityType": "PhotoFlash::User", + "entityId": "alice" + }, + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "ViewPhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + "decision": "ALLOW", + "determiningPolicies": [ + { + "policyId": "9wYxMpljbbZQb5fcZHyJhY" + } + ], + "errors": [] + }, + { + "request": { + "principal": { + "entityType": "PhotoFlash::User", + "entityId": "annalisa" + }, + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "DeletePhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + "decision": "DENY", + "determiningPolicies": [], + "errors": [] + } + ] + } + } + ], "smithy.api#readonly": {} } }, @@ -301,6 +458,137 @@ "name": "IsAuthorizedWithToken" }, "smithy.api#documentation": "

Makes a series of decisions about multiple authorization requests for one token. The\n principal in this request comes from an external identity source in the form of an identity or\n access token, formatted as a JSON\n web token (JWT). The information in the parameters can also define\n additional context that Verified Permissions can include in the evaluations.

\n

The request is evaluated against all policies in the specified policy store that match the\n entities that you provide in the entities declaration and in the token. The result of\n the decisions is a series of Allow or Deny responses, along\n with the IDs of the policies that produced each decision.

\n

The entities of a BatchIsAuthorizedWithToken API request can\n contain up to 100 resources and up to 99 user groups. The requests of a\n BatchIsAuthorizedWithToken API request can contain up to 30\n requests.

\n \n

The BatchIsAuthorizedWithToken operation doesn't have its own\n IAM permission. To authorize this operation for Amazon Web Services principals, include the\n permission verifiedpermissions:IsAuthorizedWithToken in their IAM\n policies.

\n
", + "smithy.api#examples": [ + { + "title": "Batch - Example 1", + "documentation": "The following example requests three authorization decisions for two resources and two actions in different photo albums.", + "input": { + "identityToken": "eyJra12345EXAMPLE", + "requests": [ + { + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "ViewPhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + { + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "SharePhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + { + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "ViewPhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "OfficePhoto94.jpg" + } + } + ], + "entities": { + "entityList": [ + { + "identifier": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + }, + "parents": [ + { + "entityType": "PhotoFlash::Album", + "entityId": "MyExampleAlbum1" + } + ] + }, + { + "identifier": { + "entityType": "PhotoFlash::Photo", + "entityId": "OfficePhoto94.jpg" + }, + "parents": [ + { + "entityType": "PhotoFlash::Album", + "entityId": "MyExampleAlbum2" + } + ] + } + ] + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "principal": { + "entityType": "PhotoFlash::User", + "entityId": "us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + }, + "results": [ + { + "request": { + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "ViewPhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + "decision": "ALLOW", + "determiningPolicies": [ + { + "policyId": "9wYixMplbbZQb5fcZHyJhY" + } + ], + "errors": [] + }, + { + "request": { + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "SharePhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "VacationPhoto94.jpg" + } + }, + "decision": "ALLOW", + "determiningPolicies": [ + { + "policyId": "9wYixMplbbZQb5fcZHyJhY" + } + ], + "errors": [] + }, + { + "request": { + "action": { + "actionType": "PhotoFlash::Action", + "actionId": "ViewPhoto" + }, + "resource": { + "entityType": "PhotoFlash::Photo", + "entityId": "OfficePhoto94.jpg" + } + }, + "decision": "DENY", + "determiningPolicies": [], + "errors": [] + } + ] + } + } + ], "smithy.api#readonly": {} } }, @@ -754,7 +1042,32 @@ "aws.iam#iamAction": { "documentation": "Grants permission to create a reference to an external identity provider (IdP) that is compatible with OpenID Connect (OIDC) authentication protocol, such as Amazon Cognito" }, - "smithy.api#documentation": "

Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect\n (OIDC) identity provider (IdP).\n

\n

After you create an identity source, you can use the identities provided by the IdP as proxies\n for the principal in authorization queries that use the IsAuthorizedWithToken or\n BatchIsAuthorizedWithToken API operations. These identities take the form\n of tokens that contain claims about the user, such as IDs, attributes and group\n memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions\n derives information about your user and session from token claims. Access tokens provide\n action context to your policies, and ID tokens provide principal\n Attributes.

\n \n

Tokens from an identity source user continue to be usable until they expire. \n Token revocation and resource deletion have no effect on the validity of a token in your policy store

\n
\n \n

To reference a user from this identity source in your Cedar policies, refer to the\n following syntax examples.

\n
    \n
  • \n

    Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user\n principal attribute], for example\n MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111.

    \n
  • \n
  • \n

    OpenID Connect (OIDC) provider: Namespace::[Entity\n type]::[principalIdClaim]|[user principal attribute], for example\n MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222.

    \n
  • \n
\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#documentation": "

Adds an identity source to a policy store–an Amazon Cognito user pool or OpenID Connect\n (OIDC) identity provider (IdP).\n

\n

After you create an identity source, you can use the identities provided by the IdP as proxies\n for the principal in authorization queries that use the IsAuthorizedWithToken or\n BatchIsAuthorizedWithToken API operations. These identities take the form\n of tokens that contain claims about the user, such as IDs, attributes and group\n memberships. Identity sources provide identity (ID) tokens and access tokens. Verified Permissions\n derives information about your user and session from token claims. Access tokens provide\n action context to your policies, and ID tokens provide principal\n Attributes.

\n \n

Tokens from an identity source user continue to be usable until they expire. \n Token revocation and resource deletion have no effect on the validity of a token in your policy store

\n
\n \n

To reference a user from this identity source in your Cedar policies, refer to the\n following syntax examples.

\n
    \n
  • \n

    Amazon Cognito user pool: Namespace::[Entity type]::[User pool ID]|[user\n principal attribute], for example\n MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111.

    \n
  • \n
  • \n

    OpenID Connect (OIDC) provider: Namespace::[Entity\n type]::[entityIdPrefix]|[user principal attribute], for example\n MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222.

    \n
  • \n
\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "To create an identity source", + "documentation": "The following ``create-identity-source`` example creates an identity source that lets you reference identities stored in the specified Amazon Cognito user pool. Those identities are available in Verified Permissions as entities of type ``User``. ", + "input": { + "configuration": { + "cognitoUserPoolConfiguration": { + "userPoolArn": "arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5", + "clientIds": [ + "a1b2c3d4e5f6g7h8i9j0kalbmc" + ] + } + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "principalEntityType": "User", + "clientToken": "a1b2c3d4-e5f6-a1b2-c3d4-TOKEN1111111" + }, + "output": { + "createdDate": "2024-08-12T18:20:50.99Z", + "identitySourceId": "ISEXAMPLEabcdefg111111", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + } + } + ], "smithy.api#idempotent": {} } }, @@ -853,6 +1166,98 @@ "documentation": "Grants permission to create a Cedar policy and save it in the specified policy store" }, "smithy.api#documentation": "

Creates a Cedar policy and saves it in the specified policy store. You can create either a\n static policy or a policy linked to a policy template.

\n
    \n
  • \n

    To create a static policy, provide the Cedar policy text in the\n StaticPolicy section of the\n PolicyDefinition.

    \n
  • \n
  • \n

    To create a policy that is dynamically linked to a policy template, specify the policy template ID\n and the principal and resource to associate with this policy in the\n templateLinked section of the PolicyDefinition. If the\n policy template is ever updated, any policies linked to the policy template automatically use the\n updated template.

    \n
  • \n
\n \n

Creating a policy causes it to be validated against the schema in the policy store. If the\n policy doesn't pass validation, the operation fails and the policy isn't\n stored.

\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "To create a static policy", + "documentation": "The following example request creates a static policy with a policy scope that specifies both a principal and a resource. The response includes both the Principal and Resource elements because both were specified in the request policy scope.", + "input": { + "definition": { + "static": { + "description": "Grant members of janeFriends UserGroup access to the vacationFolder Album", + "statement": "permit( principal in UserGroup::\"janeFriends\", action, resource in Album::\"vacationFolder\" );" + } + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "clientToken": "a1b2c3d4-e5f6-a1b2-c3d4-TOKEN1111111" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyType": "STATIC", + "principal": { + "entityId": "janeFriends", + "entityType": "UserGroup" + }, + "resource": { + "entityId": "vacationFolder", + "entityType": "Album" + } + } + }, + { + "title": "To create a static policy", + "documentation": "The following example request creates a static policy with a policy scope that specifies both a principal and a resource. The response includes both the Principal and Resource elements because both were specified in the request policy scope.", + "input": { + "definition": { + "static": { + "description": "Grant members of janeFriends UserGroup access to the vacationFolder Album", + "statement": "permit( principal in UserGroup::\"janeFriends\", action, resource in Album::\"vacationFolder\" );" + } + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "clientToken": "a1b2c3d4-e5f6-a1b2-c3d4-TOKEN1111111" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyType": "STATIC", + "principal": { + "entityId": "janeFriends", + "entityType": "UserGroup" + }, + "resource": { + "entityId": "vacationFolder", + "entityType": "Album" + } + } + }, + { + "title": "To create a template-linked policy", + "documentation": "The following example creates a template-linked policy using the specified policy template and associates the specified principal to use with the new template-linked policy.", + "input": { + "definition": { + "templateLinked": { + "policyTemplateId": "PTEXAMPLEabcdefg111111", + "principal": { + "entityType": "User", + "entityId": "alice" + } + } + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "clientToken": "a1b2c3d4-e5f6-a1b2-c3d4-TOKEN1111111" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "Et9KxMplyaDdyurDw8TeFa", + "policyType": "TEMPLATE_LINKED", + "principal": { + "entityType": "User", + "entityId": "alice" + }, + "resource": { + "entityType": "Photo", + "entityId": "VacationPhoto94.jpg" + }, + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#idempotent": {} } }, @@ -973,6 +1378,24 @@ "documentation": "Grants permission to create a Cedar policy and save it in the specified policy store" }, "smithy.api#documentation": "

Creates a policy store. A policy store is a container for policy resources.

\n \n

Although Cedar supports multiple namespaces, Verified Permissions currently supports only one\n namespace per policy store.

\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "To create policy store", + "documentation": "The following example creates a new policy store with strict validation turned on.", + "input": { + "validationSettings": { + "mode": "STRICT" + }, + "clientToken": "a1b2c3d4-e5f6-a1b2-c3d4-TOKEN1111111" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "arn": "arn:aws:verifiedpermissions::123456789012:policy-store/C7v5xMplfFH3i3e4Jrzb1a", + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#idempotent": {} } }, @@ -1064,6 +1487,24 @@ "documentation": "Grants permission to create a policy template" }, "smithy.api#documentation": "

Creates a policy template. A template can use placeholders for the principal and resource. A\n template must be instantiated into a policy by associating it with specific principals\n and resources to use for the placeholders. That instantiated policy can then be\n considered in authorization decisions. The instantiated policy works identically to any\n other policy, except that it is dynamically linked to the template. If the template\n changes, then any policies that are linked to that template are immediately updated as\n well.

\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "To create a policy template", + "documentation": "The following example creates a policy template that has a placeholder for the principal.", + "input": { + "description": "Template for research dept", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "statement": "\"AccessVacation\"\npermit(\n principal in ?principal,\n action == Action::\"view\",\n resource == Photo::\"VacationPhoto94.jpg\"\n)\nwhen {\n principal has department && principal.department == \"research\"\n};", + "clientToken": "a1b2c3d4-e5f6-a1b2-c3d4-TOKEN1111111" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111", + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#idempotent": {} } }, @@ -1176,6 +1617,17 @@ "documentation": "Grants permission to delete an identity source that references an identity provider (IdP) such as Amazon Cognito" }, "smithy.api#documentation": "

Deletes an identity source that references an identity provider (IdP) such as Amazon Cognito. After\n you delete the identity source, you can no longer use tokens for identities from that identity source to\n represent principals in authorization queries made using IsAuthorizedWithToken.\n operations.

", + "smithy.api#examples": [ + { + "title": "To delete an identity source", + "documentation": "The following example request deletes the specified identity source.", + "input": { + "identitySourceId": "ISEXAMPLEabcdefg111111", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": {} + } + ], "smithy.api#idempotent": {} } }, @@ -1229,6 +1681,17 @@ "documentation": "Grants permission to delete the specified policy from the policy store" }, "smithy.api#documentation": "

Deletes the specified policy from the policy store.

\n

This operation is idempotent; if you specify a policy that doesn't \n exist, the request response returns a successful HTTP 200 status code.

", + "smithy.api#examples": [ + { + "title": "To delete a policy", + "documentation": "The following example deletes the specified policy from its policy store.", + "input": { + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": {} + } + ], "smithy.api#idempotent": {} } }, @@ -1274,6 +1737,16 @@ "documentation": "Grants permission to delete the specified policy store" }, "smithy.api#documentation": "

Deletes the specified policy store.

\n

This operation is idempotent. If you specify a policy store that does not exist, the request\n response will still return a successful HTTP 200 status code.

", + "smithy.api#examples": [ + { + "title": "To delete a policy store", + "documentation": "The following example deletes the specified policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": {} + } + ], "smithy.api#idempotent": {} } }, @@ -1320,6 +1793,17 @@ "documentation": "Grants permission to delete the specified policy template from the policy store" }, "smithy.api#documentation": "

Deletes the specified policy template from the policy store.

\n \n

This operation also deletes any policies that were created from the specified\n policy template. Those policies are immediately removed from all future API responses, and are\n asynchronously deleted from the policy store.

\n
", + "smithy.api#examples": [ + { + "title": "To delete a policy template", + "documentation": "The following example deletes a policy template. Before you can perform this operation, you must first delete any template-linked policies that were instantiated from this policy template. To delete them, use DeletePolicy.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111" + }, + "output": {} + } + ], "smithy.api#idempotent": {} } }, @@ -1559,6 +2043,31 @@ "documentation": "Grants permission to retrieve the details about the specified identity source" }, "smithy.api#documentation": "

Retrieves the details about the specified identity source.

", + "smithy.api#examples": [ + { + "title": "To retrieve details about an identity source", + "documentation": "The following example retrieves the details for the specified identity source.", + "input": { + "identitySourceId": "ISEXAMPLEabcdefg111111", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "createdDate": "2024-08-12T18:20:50.99Z", + "details": { + "clientIds": [ + "a1b2c3d4e5f6g7h8i9j0kalbmc" + ], + "userPoolArn": "arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5", + "discoveryUrl": "https://cognito-idp.us-east-1.amazonaws.com/us-east-1_1a2b3c4d5", + "openIdIssuer": "COGNITO" + }, + "identitySourceId": "ISEXAMPLEabcdefg111111", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "principalEntityType": "AWS::Cognito" + } + } + ], "smithy.api#readonly": {} } }, @@ -1663,6 +2172,33 @@ "documentation": "Grants permission to retrieve information about the specified policy" }, "smithy.api#documentation": "

Retrieves information about the specified policy.

", + "smithy.api#examples": [ + { + "title": "To retrieve details about a policy", + "documentation": "The following example retrieves information about the specified policy contained in the specified policy store. In this example, the requested policy is a template-linked policy, so it returns the ID of the policy template, and the specific principal and resource used by this policy.", + "input": { + "policyId": "9wYixMplbbZQb5fcZHyJhY", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "policyType": "STATIC", + "resource": { + "entityId": "publicFolder", + "entityType": "Album" + }, + "definition": { + "static": { + "description": "Grant everyone of janeFriends UserGroup access to the vacationFolder Album", + "statement": "permit(principal, action, resource in Album::\"publicFolder\");" + } + }, + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#readonly": {} } }, @@ -1787,6 +2323,24 @@ "documentation": "Grants permission to retrieve details about a policy store" }, "smithy.api#documentation": "

Retrieves details about a policy store.

", + "smithy.api#examples": [ + { + "title": "GetPolicyStore", + "documentation": "The following example retrieves details about the specified policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "arn": "arn:aws:verifiedpermissions::123456789012:policy-store/C7v5xMplfFH3i3e4Jrzb1a", + "validationSettings": { + "mode": "STRICT" + }, + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#readonly": {} } }, @@ -1874,6 +2428,24 @@ "documentation": "Grants permission to retrieve the details for the specified policy template in the specified policy store" }, "smithy.api#documentation": "

Retrieve the details for the specified policy template in the specified policy store.

", + "smithy.api#examples": [ + { + "title": "GetPolicyTemplate", + "documentation": "The following example displays the details of the specified policy template.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111", + "description": "Template for research dept", + "statement": "permit(\n principal ?principal,\n action == Action::\"view\",\n resource in ?resource\n) when {\n principal has department && principal.department == \"research\" \n};", + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#readonly": {} } }, @@ -1968,6 +2540,21 @@ "documentation": "Grants permission to retrieve the details for the specified schema in the specified policy store" }, "smithy.api#documentation": "

Retrieve the details for the specified schema in the specified policy store.

", + "smithy.api#examples": [ + { + "title": "GetSchema", + "documentation": "The following example retrieves the current schema stored in the specified policy store.\n\nNote\nThe JSON in the parameters of this operation are strings that can contain embedded quotation marks (\") within the outermost quotation mark pair. This requires that you stringify the JSON object by preceding all embedded quotation marks with a backslash character ( \\\" ) and combining all lines into a single text line with no line breaks.\n\nExample strings might be displayed wrapped across multiple lines here for readability, but the operation requires the parameters be submitted as single line strings.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "schema": "{\n\"My::Application\": {\n\"actions\": {\n\"remoteAccess\": {\n\"appliesTo\": {\n\"principalTypes\": [\"Employee\"]\n}\n}\n},\n\"entityTypes\": {\n\"Employee\": {\n\"shape\": {\n\"attributes\": {\n\"jobLevel\": { \"type\": \"Long\" },\n\"name\": { \"type\":\"String\" }\n},\n\"type\": \"Record\"\n}\n}\n}\n}\n }", + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z" + } + } + ], "smithy.api#readonly": {} } }, @@ -2309,6 +2896,60 @@ "documentation": "Grants permission to make an authorization decision about a service request described in the parameters" }, "smithy.api#documentation": "

Makes an authorization decision about a service request described in the parameters.\n The information in the parameters can also define additional context that Verified Permissions can\n include in the evaluation. The request is evaluated against all matching policies in the\n specified policy store. The result of the decision is either Allow or\n Deny, along with a list of the policies that resulted in the\n decision.

", + "smithy.api#examples": [ + { + "title": "IsAuthorized - Example 1", + "documentation": "The following example requests an authorization decision for a principal of type User named Alice, who wants to perform the updatePhoto operation, on a resource of type Photo named VacationPhoto94.jpg.\n\nThe response shows that the request was allowed by one policy.", + "input": { + "principal": { + "entityType": "User", + "entityId": "alice" + }, + "action": { + "actionType": "Action", + "actionId": "view" + }, + "resource": { + "entityType": "Photo", + "entityId": "VacationPhoto94.jpg" + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "decision": "ALLOW", + "determiningPolicies": [ + { + "policyId": "9wYxMpljbbZQb5fcZHyJhY" + } + ], + "errors": [] + } + }, + { + "title": "IsAuthorized - Example 2", + "documentation": "The following example is the same as the previous example, except that the principal is User::\"bob\", and the policy store doesn't contain any policy that allows that user access to Album::\"alice_folder\". The output infers that the Deny was implicit because the list of DeterminingPolicies is empty.", + "input": { + "principal": { + "entityType": "User", + "entityId": "bob" + }, + "action": { + "actionType": "Action", + "actionId": "view" + }, + "resource": { + "entityType": "Photo", + "entityId": "VacationPhoto94.jpg" + }, + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "decision": "DENY", + "determiningPolicies": [], + "errors": [] + } + } + ], "smithy.api#readonly": {} } }, @@ -2403,7 +3044,34 @@ "aws.iam#iamAction": { "documentation": "Grants permission to make an authorization decision about a service request described in the parameters. The principal in this request comes from an external identity source" }, - "smithy.api#documentation": "

Makes an authorization decision about a service request described in the parameters.\n The principal in this request comes from an external identity source in the form of an identity\n token formatted as a JSON web\n token (JWT). The information in the parameters can also define additional\n context that Verified Permissions can include in the evaluation. The request is evaluated against all\n matching policies in the specified policy store. The result of the decision is either\n Allow or Deny, along with a list of the policies that\n resulted in the decision.

\n

At this time, Verified Permissions accepts tokens from only Amazon Cognito.

\n

Verified Permissions validates each token that is specified in a request by checking its expiration\n date and its signature.

\n \n

Tokens from an identity source user continue to be usable until they expire. \n Token revocation and resource deletion have no effect on the validity of a token in your policy store

\n
", + "smithy.api#documentation": "

Makes an authorization decision about a service request described in the parameters.\n The principal in this request comes from an external identity source in the form of an identity\n token formatted as a JSON web\n token (JWT). The information in the parameters can also define additional\n context that Verified Permissions can include in the evaluation. The request is evaluated against all\n matching policies in the specified policy store. The result of the decision is either\n Allow or Deny, along with a list of the policies that\n resulted in the decision.

\n

Verified Permissions validates each token that is specified in a request by checking its expiration\n date and its signature.

\n \n

Tokens from an identity source user continue to be usable until they expire. \n Token revocation and resource deletion have no effect on the validity of a token in your policy store

\n
", + "smithy.api#examples": [ + { + "title": "IsAuthorizedWithToken - Example 1", + "documentation": "The following example requests an authorization decision for a user who was authenticated by Amazon Cognito. The request uses the identity token provided by Amazon Cognito instead of the access token. In this example, the specified information store is configured to return principals as entities of type CognitoUser. The policy store contains a policy with the following statement.\n\npermit(\n principal == CognitoUser::\"us-east-1_1a2b3c4d5|a1b2c3d4e5f6g7h8i9j0kalbmc\",\n action,\n resource == Photo::\"VacationPhoto94.jpg\"\n);", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "action": { + "actionId": "View", + "actionType": "Action" + }, + "resource": { + "entityId": "vacationPhoto94.jpg", + "entityType": "Photo" + }, + "identityToken": "EgZjxMPlbWUyBggAEEUYOdIBCDM3NDlqMGo3qAIAsAIA" + }, + "output": { + "decision": "ALLOW", + "determiningPolicies": [ + { + "policyId": "9wYxMpljbbZQb5fcZHyJhY" + } + ], + "errors": [] + } + } + ], "smithy.api#readonly": {} } }, @@ -2521,6 +3189,34 @@ "documentation": "Grants permission to return a paginated list of all of the identity sources defined in the specified policy store" }, "smithy.api#documentation": "

Returns a paginated list of all of the identity sources defined in the specified policy store.

", + "smithy.api#examples": [ + { + "title": "ListIdentitySources", + "documentation": "The following example request creates lists the identity sources currently defined in the specified policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "identitySources": [ + { + "createdDate": "2023-05-19T20:29:23.66812Z", + "details": { + "clientIds": [ + "a1b2c3d4e5f6g7h8i9j0kalbmc" + ], + "userPoolArn": "arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5", + "discoveryUrl": "https://cognito-idp.us-east-1.amazonaws.com/us-east-1_1a2b3c4d5", + "openIdIssuer": "COGNITO" + }, + "identitySourceId": "ISEXAMPLEabcdefg111111", + "lastUpdatedDate": "2023-05-19T20:29:23.66812Z", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "principalEntityType": "User" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -2610,6 +3306,154 @@ "documentation": "Grants permission to return a paginated list of all policies stored in the specified policy store" }, "smithy.api#documentation": "

Returns a paginated list of all policies stored in the specified policy store.

", + "smithy.api#examples": [ + { + "title": "ListPolicies - Example 1", + "documentation": "The following example lists all policies in the policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "policies": [ + { + "createdDate": "2024-08-12T18:20:50.99Z", + "definition": { + "static": { + "description": "Grant members of janeFriends UserGroup access to the vacationFolder Album" + } + }, + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyType": "STATIC", + "principal": { + "entityId": "janeFriends", + "entityType": "UserGroup" + }, + "resource": { + "entityId": "vacationFolder", + "entityType": "Album" + } + }, + { + "createdDate": "2024-08-12T18:20:50.99Z", + "definition": { + "static": { + "description": "Grant everyone access to the publicFolder Album" + } + }, + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyId": "Et9KxMplyaDdyurDw8TeFa", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyType": "STATIC", + "resource": { + "entityId": "publicFolder", + "entityType": "Album" + } + } + ] + } + }, + { + "title": "ListPolicies - Example 2", + "documentation": "The following example lists all policies for a specified principal.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "filter": { + "principal": { + "identifier": { + "entityType": "User", + "entityId": "alice" + } + } + } + }, + "output": { + "policies": [ + { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "Et9KxMplyaDdyurDw8TeFa", + "policyType": "STATIC", + "principal": { + "entityType": "User", + "entityId": "alice" + }, + "resource": { + "entityType": "Album", + "entityId": "bob_folder" + }, + "definition": { + "static": { + "description": "An example policy" + } + }, + "createdDate": "2022-12-09T22:55:16.067533Z", + "lastUpdatedDate": "2022-12-09T22:55:16.067533Z" + }, + { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "policyType": "STATIC", + "principal": { + "entityType": "User", + "entityId": "alice" + }, + "resource": { + "entityType": "Album", + "entityId": "alice_folder" + }, + "definition": { + "static": {} + }, + "createdDate": "2022-12-09T23:00:24.66266Z", + "lastUpdatedDate": "2022-12-09T23:00:24.66266Z" + } + ] + } + }, + { + "title": "ListPolicies - Example 3", + "documentation": "The following example uses the Filter parameter to list only the template-linked policies in the specified policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "filter": { + "policyType": "TEMPLATE_LINKED" + } + }, + "output": { + "policies": [ + { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "policyType": "TEMPLATE_LINKED", + "principal": { + "entityType": "User", + "entityId": "alice" + }, + "resource": { + "entityType": "Photo", + "entityId": "pic.jpg" + }, + "definition": { + "templateLinked": { + "policyTemplateId": "PTEXAMPLEabcdefg111111", + "principal": { + "entityType": "User", + "entityId": "alice" + }, + "resource": { + "entityType": "Photo", + "entityId": "pic.jpg" + } + } + }, + "createdDate": "2023-06-13T16:03:07.620867Z", + "lastUpdatedDate": "2023-06-13T16:03:07.620867Z" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -2686,6 +3530,26 @@ "documentation": "Grants permission to return a paginated list of all policy stores in the calling Amazon Web Services account" }, "smithy.api#documentation": "

Returns a paginated list of all policy stores in the calling Amazon Web Services account.

", + "smithy.api#examples": [ + { + "title": "ListPolicyStores", + "documentation": "The following example lists all policy stores in the AWS account in the AWS Region in which you call the operation.", + "output": { + "policyStores": [ + { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "arn": "arn:aws:verifiedpermissions::123456789012:policy-store/C7v5xMplfFH3i3e4Jrzb1a", + "createdDate": "2023-05-16T17:41:29.103459Z" + }, + { + "policyStoreId": "PSEXAMPLEabcdefg222222", + "arn": "arn:aws:verifiedpermissions::123456789012:policy-store/PSEXAMPLEabcdefg222222", + "createdDate": "2023-05-16T18:23:04.985521Z" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -2754,6 +3618,33 @@ "documentation": "Grants permission to return a paginated list of all policy templates in the specified policy store" }, "smithy.api#documentation": "

Returns a paginated list of all policy templates in the specified policy store.

", + "smithy.api#examples": [ + { + "title": "ListPolicyTemplates", + "documentation": "The following example retrieves a list of all of the policy templates in the specified policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + }, + "output": { + "policyTemplates": [ + { + "createdDate": "2024-08-12T18:20:50.99Z", + "description": "Generic template", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111" + }, + { + "createdDate": "2024-08-12T18:20:50.99Z", + "description": "Template for research dept", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg222222" + } + ] + } + } + ], "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -3395,7 +4286,7 @@ "policyType": { "target": "com.amazonaws.verifiedpermissions#PolicyType", "traits": { - "smithy.api#documentation": "

The type of the policy. This is one of the following values:

\n
    \n
  • \n

    \n static\n

    \n
  • \n
  • \n

    \n templateLinked\n

    \n
  • \n
", + "smithy.api#documentation": "

The type of the policy. This is one of the following values:

\n
    \n
  • \n

    \n STATIC\n

    \n
  • \n
  • \n

    \n TEMPLATE_LINKED\n

    \n
  • \n
", "smithy.api#required": {} } }, @@ -3748,6 +4639,26 @@ "documentation": "Grants permission to create or update the policy schema in the specified policy store" }, "smithy.api#documentation": "

Creates or updates the policy schema in the specified policy store. The schema is used to\n validate any Cedar policies and policy templates submitted to the policy store. Any changes to the schema\n validate only policies and templates submitted after the schema change. Existing\n policies and templates are not re-evaluated against the changed schema. If you later\n update a policy, then it is evaluated against the new schema at that time.

\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "PutSchema", + "documentation": "The following example creates a new schema, or updates an existing schema, in the specified policy store. Note that the schema text is shown line wrapped for readability. You should submit the entire schema text as a single line of text.\n\nNote\nThe JSON in the parameters of this operation are strings that can contain embedded quotation marks (\") within the outermost quotation mark pair. This requires that you stringify the JSON object by preceding all embedded quotation marks with a backslash character ( \\\" ) and combining all lines into a single text line with no line breaks.\n\nExample strings might be displayed wrapped across multiple lines here for readability, but the operation requires the parameters be submitted as single line strings.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "definition": { + "cedarJson": "{\"MySampleNamespace\": {\"actions\": {\"remoteAccess\": {\"appliesTo\": {\"principalTypes\": [\"Employee\"]}}},\"entityTypes\": {\"Employee\": {\"shape\": {\"attributes\": {\"jobLevel\": {\"type\": \"Long\"},\"name\": {\"type\": \"String\"}},\"type\": \"Record\"}}}}}" + } + }, + "output": { + "createdDate": "2023-06-13T19:28:06.003726Z", + "lastUpdatedDate": "2023-06-13T19:28:06.003726Z", + "namespaces": [ + "My::Sample::Namespace" + ], + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + } + } + ], "smithy.api#idempotent": {} } }, @@ -3927,7 +4838,7 @@ "cedarJson": { "target": "com.amazonaws.verifiedpermissions#SchemaJson", "traits": { - "smithy.api#documentation": "

A JSON string representation of the schema supported by applications that use this\n policy store. For more information, see Policy store schema in the\n Amazon Verified Permissions User Guide.

" + "smithy.api#documentation": "

A JSON string representation of the schema supported by applications that use this\n policy store. To delete the schema, run PutSchema with {} for this parameter. \n For more information, see Policy store schema in the\n Amazon Verified Permissions User Guide.

" } } }, @@ -3970,7 +4881,7 @@ "serviceCode": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The code for the Amazon Web Service that owns the quota.

" + "smithy.api#documentation": "

The code for the Amazon Web Services service that owns the quota.

" } }, "quotaCode": { @@ -4157,7 +5068,7 @@ "serviceCode": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

The code for the Amazon Web Service that owns the quota.

" + "smithy.api#documentation": "

The code for the Amazon Web Services service that owns the quota.

" } }, "quotaCode": { @@ -4276,6 +5187,30 @@ "documentation": "Grants permission to update the specified identity source to use a new identity provider (IdP) source, or to change the mapping of identities from the IdP to a different principal entity type" }, "smithy.api#documentation": "

Updates the specified identity source to use a new identity provider (IdP), or to change\n the mapping of identities from the IdP to a different principal entity type.

\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "UpdateIdentitySource", + "documentation": "The following example updates the configuration of the specified identity source with a new configuration.", + "input": { + "identitySourceId": "ISEXAMPLEabcdefg111111", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "updateConfiguration": { + "cognitoUserPoolConfiguration": { + "userPoolArn": "arn:aws:cognito-idp:us-east-1:123456789012:userpool/us-east-1_1a2b3c4d5", + "clientIds": [ + "a1b2c3d4e5f6g7h8i9j0kalbmc" + ] + } + } + }, + "output": { + "createdDate": "2023-05-19T20:30:28.173926Z", + "identitySourceId": "ISEXAMPLEabcdefg111111", + "lastUpdatedDate": "2023-05-22T20:45:59.962216Z", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a" + } + } + ], "smithy.api#idempotent": {} } }, @@ -4300,7 +5235,7 @@ "target": "com.amazonaws.verifiedpermissions#UpdateConfiguration", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "

Specifies the details required to communicate with the identity provider (IdP)\n associated with this identity source.

\n \n

At this time, the only valid member of this structure is a Amazon Cognito user pool\n configuration.

\n

You must specify a userPoolArn, and optionally, a\n ClientId.

\n
", + "smithy.api#documentation": "

Specifies the details required to communicate with the identity provider (IdP)\n associated with this identity source.

", "smithy.api#required": {} } }, @@ -4493,6 +5428,32 @@ "documentation": "Grants permission to modify the specified Cedar static policy in the specified policy store" }, "smithy.api#documentation": "

Modifies a Cedar static policy in the specified policy store. You can change only certain elements of\n the UpdatePolicyDefinition parameter. You can directly update only static policies. To\n change a template-linked policy, you must update the template instead, using UpdatePolicyTemplate.

\n \n
    \n
  • \n

    If policy validation is enabled in the policy store, then updating a static policy causes\n Verified Permissions to validate the policy against the schema in the policy store. If the updated\n static policy doesn't pass validation, the operation fails and the update isn't\n stored.

    \n
  • \n
  • \n

    When you edit a static policy, you can change only certain elements of a static\n policy:

    \n
      \n
    • \n

      The action referenced by the policy.

      \n
    • \n
    • \n

      A condition clause, such as when and unless.

      \n
    • \n
    \n

    You can't change these elements of a static policy:

    \n
      \n
    • \n

      Changing a policy from a static policy to a template-linked\n policy.

      \n
    • \n
    • \n

      Changing the effect of a static policy from permit or forbid.\n

      \n
    • \n
    • \n

      The principal referenced by a static policy.

      \n
    • \n
    • \n

      The resource referenced by a static policy.

      \n
    • \n
    \n
  • \n
  • \n

    To update a template-linked policy, you must update the template instead.

    \n
  • \n
\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "UpdatePolicy", + "documentation": "The following example replaces the definition of the specified static policy with a new one.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "definition": { + "static": { + "statement": "permit(principal, action, resource in Album::\"public_folder\");" + } + } + }, + "output": { + "createdDate": "2024-08-12T18:20:50.99Z", + "lastUpdatedDate": "2024-08-12T18:20:50.99Z", + "policyId": "9wYxMpljbbZQb5fcZHyJhY", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyType": "STATIC", + "resource": { + "entityType": "Album", + "entityId": "public_folder" + } + } + } + ], "smithy.api#idempotent": {} } }, @@ -4628,6 +5589,24 @@ "documentation": "Grants permission to modify the validation setting for a policy store" }, "smithy.api#documentation": "

Modifies the validation setting for a policy store.

\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "UpdatePolicyStore", + "documentation": "The following example turns off the validation settings for a policy store.", + "input": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "validationSettings": { + "mode": "OFF" + } + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "arn": "arn:aws:verifiedpermissions::123456789012:policy-store/C7v5xMplfFH3i3e4Jrzb1a", + "createdDate": "2023-05-17T18:36:10.134448Z", + "lastUpdatedDate": "2023-05-23T18:18:12.443083Z" + } + } + ], "smithy.api#idempotent": {} } }, @@ -4716,6 +5695,24 @@ "documentation": "Grants permission to update the specified policy template" }, "smithy.api#documentation": "

Updates the specified policy template. You can update only the description and the some elements\n of the policyBody.

\n \n

Changes you make to the policy template content are immediately (within the constraints of\n eventual consistency) reflected in authorization decisions that involve all template-linked policies\n instantiated from this template.

\n
\n \n

Verified Permissions is \n eventually consistent\n . It can take a few seconds for a new or changed element to propagate through\n the service and be visible in the results of other Verified Permissions operations.

\n
", + "smithy.api#examples": [ + { + "title": "UpdatePolicyTemplate", + "documentation": "The following example updates a policy template with both a new description and a new policy body. The effect, principal, and resource are the same as the original policy template. Only the action in the head, and the when and unless clauses can be different.\n\nNote\nThe JSON in the parameters of this operation are strings that can contain embedded quotation marks (\") within the outermost quotation mark pair. This requires that you stringify the JSON object by preceding all embedded quotation marks with a backslash character ( \\\" ) and combining all lines into a single text line with no line breaks.\n\nExample strings might be displayed wrapped across multiple lines here for readability, but the operation requires the parameters be submitted as single line strings.", + "input": { + "description": "My updated template description", + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111", + "statement": "\"ResearchAccess\"\npermit(\nprincipal in ?principal,\naction == Action::\"view\",\nresource in ?resource\"\n)\nwhen {\nprincipal has department && principal.department == \"research\"\n};" + }, + "output": { + "policyStoreId": "C7v5xMplfFH3i3e4Jrzb1a", + "policyTemplateId": "PTEXAMPLEabcdefg111111", + "createdDate": "2023-05-17T18:58:48.795411Z", + "lastUpdatedDate": "2023-05-17T19:18:48.870209Z" + } + } + ], "smithy.api#idempotent": {} } }, diff --git a/models/worklink.json b/models/worklink.json deleted file mode 100644 index 5ef9ccf2c6..0000000000 --- a/models/worklink.json +++ /dev/null @@ -1,3965 +0,0 @@ -{ - "smithy": "2.0", - "metadata": { - "suppressions": [ - { - "id": "HttpMethodSemantics", - "namespace": "*" - }, - { - "id": "HttpResponseCodeSemantics", - "namespace": "*" - }, - { - "id": "PaginatedTrait", - "namespace": "*" - }, - { - "id": "HttpHeaderTrait", - "namespace": "*" - }, - { - "id": "HttpUriConflict", - "namespace": "*" - }, - { - "id": "Service", - "namespace": "*" - } - ] - }, - "shapes": { - "com.amazonaws.worklink#AcmCertificateArn": { - "type": "string", - "traits": { - "smithy.api#pattern": "^arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:[\\w+=/,.@-]*:[0-9]+:[\\w+=,.@-]+(/[\\w+=/,.@-]+)*$" - } - }, - "com.amazonaws.worklink#AssociateDomain": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#AssociateDomainRequest" - }, - "output": { - "target": "com.amazonaws.worklink#AssociateDomainResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceAlreadyExistsException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Specifies a domain to be associated to Amazon WorkLink.

", - "smithy.api#http": { - "method": "POST", - "uri": "/associateDomain", - "code": 200 - } - } - }, - "com.amazonaws.worklink#AssociateDomainRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The fully qualified domain name (FQDN).

", - "smithy.api#required": {} - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name to display.

" - } - }, - "AcmCertificateArn": { - "target": "com.amazonaws.worklink#AcmCertificateArn", - "traits": { - "smithy.api#documentation": "

The ARN of an issued ACM certificate that is valid for the domain being associated.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#AssociateDomainResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#AssociateWebsiteAuthorizationProvider": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#AssociateWebsiteAuthorizationProviderRequest" - }, - "output": { - "target": "com.amazonaws.worklink#AssociateWebsiteAuthorizationProviderResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceAlreadyExistsException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Associates a website authorization provider with a specified fleet. This is used to authorize users against associated websites in the company network.

", - "smithy.api#http": { - "method": "POST", - "uri": "/associateWebsiteAuthorizationProvider", - "code": 200 - } - } - }, - "com.amazonaws.worklink#AssociateWebsiteAuthorizationProviderRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "AuthorizationProviderType": { - "target": "com.amazonaws.worklink#AuthorizationProviderType", - "traits": { - "smithy.api#documentation": "

The authorization provider type.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The domain name of the authorization provider. This applies only to SAML-based\n authorization providers.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#AssociateWebsiteAuthorizationProviderResponse": { - "type": "structure", - "members": { - "AuthorizationProviderId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the authorization provider.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#AssociateWebsiteCertificateAuthority": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#AssociateWebsiteCertificateAuthorityRequest" - }, - "output": { - "target": "com.amazonaws.worklink#AssociateWebsiteCertificateAuthorityResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceAlreadyExistsException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Imports the root certificate of a certificate authority (CA) used to obtain TLS\n certificates used by associated websites within the company network.

", - "smithy.api#http": { - "method": "POST", - "uri": "/associateWebsiteCertificateAuthority", - "code": 200 - } - } - }, - "com.amazonaws.worklink#AssociateWebsiteCertificateAuthorityRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "Certificate": { - "target": "com.amazonaws.worklink#Certificate", - "traits": { - "smithy.api#documentation": "

The root certificate of the CA.

", - "smithy.api#required": {} - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The certificate name to display.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#AssociateWebsiteCertificateAuthorityResponse": { - "type": "structure", - "members": { - "WebsiteCaId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the CA.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#AuditStreamArn": { - "type": "string", - "traits": { - "smithy.api#pattern": "^arn:aws:kinesis:.+:[0-9]{12}:stream/AmazonWorkLink-.*$" - } - }, - "com.amazonaws.worklink#AuthorizationProviderType": { - "type": "enum", - "members": { - "SAML": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SAML" - } - } - } - }, - "com.amazonaws.worklink#Boolean": { - "type": "boolean" - }, - "com.amazonaws.worklink#Certificate": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 8192 - }, - "smithy.api#pattern": "^-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?$" - } - }, - "com.amazonaws.worklink#CertificateChain": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 32768 - }, - "smithy.api#pattern": "^(-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}\\u000D?\\u000A)*-{5}BEGIN CERTIFICATE-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END CERTIFICATE-{5}(\\u000D?\\u000A)?$" - } - }, - "com.amazonaws.worklink#CompanyCode": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 32 - } - } - }, - "com.amazonaws.worklink#CreateFleet": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#CreateFleetRequest" - }, - "output": { - "target": "com.amazonaws.worklink#CreateFleetResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceAlreadyExistsException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Creates a fleet. A fleet consists of resources and the configuration that delivers\n associated websites to authorized users who download and set up the Amazon WorkLink app.

", - "smithy.api#http": { - "method": "POST", - "uri": "/createFleet", - "code": 200 - } - } - }, - "com.amazonaws.worklink#CreateFleetRequest": { - "type": "structure", - "members": { - "FleetName": { - "target": "com.amazonaws.worklink#FleetName", - "traits": { - "smithy.api#documentation": "

A unique name for the fleet.

", - "smithy.api#required": {} - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The fleet name to display.

" - } - }, - "OptimizeForEndUserLocation": { - "target": "com.amazonaws.worklink#Boolean", - "traits": { - "smithy.api#documentation": "

The option to optimize for better performance by routing traffic through the closest\n AWS Region to users, which may be outside of your home Region.

" - } - }, - "Tags": { - "target": "com.amazonaws.worklink#TagMap", - "traits": { - "smithy.api#documentation": "

The tags to add to the resource. A tag is a key-value pair.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#CreateFleetResponse": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DateTime": { - "type": "timestamp" - }, - "com.amazonaws.worklink#DeleteFleet": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DeleteFleetRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DeleteFleetResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Deletes a fleet. Prevents users from accessing previously associated websites.

", - "smithy.api#http": { - "method": "POST", - "uri": "/deleteFleet", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DeleteFleetRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DeleteFleetResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeAuditStreamConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeAuditStreamConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeAuditStreamConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Describes the configuration for delivering audit streams to the customer account.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeAuditStreamConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeAuditStreamConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeAuditStreamConfigurationResponse": { - "type": "structure", - "members": { - "AuditStreamArn": { - "target": "com.amazonaws.worklink#AuditStreamArn", - "traits": { - "smithy.api#documentation": "

The ARN of the Amazon Kinesis data stream that will receive the audit events.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeCompanyNetworkConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeCompanyNetworkConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeCompanyNetworkConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Describes the networking configuration to access the internal websites associated with\n the specified fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeCompanyNetworkConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeCompanyNetworkConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeCompanyNetworkConfigurationResponse": { - "type": "structure", - "members": { - "VpcId": { - "target": "com.amazonaws.worklink#VpcId", - "traits": { - "smithy.api#documentation": "

The VPC with connectivity to associated websites.

" - } - }, - "SubnetIds": { - "target": "com.amazonaws.worklink#SubnetIds", - "traits": { - "smithy.api#documentation": "

The subnets used for X-ENI connections from Amazon WorkLink rendering containers.

" - } - }, - "SecurityGroupIds": { - "target": "com.amazonaws.worklink#SecurityGroupIds", - "traits": { - "smithy.api#documentation": "

The security groups associated with access to the provided subnets.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeDevice": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeDeviceRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeDeviceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Provides information about a user's device.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeDevice", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeDevicePolicyConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeDevicePolicyConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeDevicePolicyConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Describes the device policy configuration for the specified fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeDevicePolicyConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeDevicePolicyConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeDevicePolicyConfigurationResponse": { - "type": "structure", - "members": { - "DeviceCaCertificate": { - "target": "com.amazonaws.worklink#Certificate", - "traits": { - "smithy.api#documentation": "

The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeDeviceRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DeviceId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for a registered user's device.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeDeviceResponse": { - "type": "structure", - "members": { - "Status": { - "target": "com.amazonaws.worklink#DeviceStatus", - "traits": { - "smithy.api#documentation": "

The current state of the device.

" - } - }, - "Model": { - "target": "com.amazonaws.worklink#DeviceModel", - "traits": { - "smithy.api#documentation": "

The model of the device.

" - } - }, - "Manufacturer": { - "target": "com.amazonaws.worklink#DeviceManufacturer", - "traits": { - "smithy.api#documentation": "

The manufacturer of the device.

" - } - }, - "OperatingSystem": { - "target": "com.amazonaws.worklink#DeviceOperatingSystemName", - "traits": { - "smithy.api#documentation": "

The operating system of the device.

" - } - }, - "OperatingSystemVersion": { - "target": "com.amazonaws.worklink#DeviceOperatingSystemVersion", - "traits": { - "smithy.api#documentation": "

The operating system version of the device.

" - } - }, - "PatchLevel": { - "target": "com.amazonaws.worklink#DevicePatchLevel", - "traits": { - "smithy.api#documentation": "

The operating system patch level of the device.

" - } - }, - "FirstAccessedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The date that the device first signed in to Amazon WorkLink.

" - } - }, - "LastAccessedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The date that the device last accessed Amazon WorkLink.

" - } - }, - "Username": { - "target": "com.amazonaws.worklink#Username", - "traits": { - "smithy.api#documentation": "

The user name associated with the device.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeDomain": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeDomainRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeDomainResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Provides information about the domain.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeDomain", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeDomainRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeDomainResponse": { - "type": "structure", - "members": { - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

" - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name to display.

" - } - }, - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time that the domain was added.

" - } - }, - "DomainStatus": { - "target": "com.amazonaws.worklink#DomainStatus", - "traits": { - "smithy.api#documentation": "

The current state for the domain.

" - } - }, - "AcmCertificateArn": { - "target": "com.amazonaws.worklink#AcmCertificateArn", - "traits": { - "smithy.api#documentation": "

The ARN of an issued ACM certificate that is valid for the domain being associated.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeFleetMetadata": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeFleetMetadataRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeFleetMetadataResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Provides basic information for the specified fleet, excluding identity provider,\n networking, and device configuration details.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeFleetMetadata", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeFleetMetadataRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeFleetMetadataResponse": { - "type": "structure", - "members": { - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time that the fleet was created.

" - } - }, - "LastUpdatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time that the fleet was last updated.

" - } - }, - "FleetName": { - "target": "com.amazonaws.worklink#FleetName", - "traits": { - "smithy.api#documentation": "

The name of the fleet.

" - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name to display.

" - } - }, - "OptimizeForEndUserLocation": { - "target": "com.amazonaws.worklink#Boolean", - "traits": { - "smithy.api#documentation": "

The option to optimize for better performance by routing traffic through the closest\n AWS Region to users, which may be outside of your home Region.

" - } - }, - "CompanyCode": { - "target": "com.amazonaws.worklink#CompanyCode", - "traits": { - "smithy.api#documentation": "

The identifier used by users to sign in to the Amazon WorkLink app.

" - } - }, - "FleetStatus": { - "target": "com.amazonaws.worklink#FleetStatus", - "traits": { - "smithy.api#documentation": "

The current state of the fleet.

" - } - }, - "Tags": { - "target": "com.amazonaws.worklink#TagMap", - "traits": { - "smithy.api#documentation": "

The tags attached to the resource. A tag is a key-value pair.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeIdentityProviderConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeIdentityProviderConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeIdentityProviderConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Describes the identity provider configuration of the specified fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeIdentityProviderConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeIdentityProviderConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeIdentityProviderConfigurationResponse": { - "type": "structure", - "members": { - "IdentityProviderType": { - "target": "com.amazonaws.worklink#IdentityProviderType", - "traits": { - "smithy.api#documentation": "

The type of identity provider.

" - } - }, - "ServiceProviderSamlMetadata": { - "target": "com.amazonaws.worklink#SamlMetadata", - "traits": { - "smithy.api#documentation": "

The SAML metadata document uploaded to the user’s identity provider.

" - } - }, - "IdentityProviderSamlMetadata": { - "target": "com.amazonaws.worklink#SamlMetadata", - "traits": { - "smithy.api#documentation": "

The SAML metadata document provided by the user’s identity provider.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DescribeWebsiteCertificateAuthority": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DescribeWebsiteCertificateAuthorityRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DescribeWebsiteCertificateAuthorityResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Provides information about the certificate authority.

", - "smithy.api#http": { - "method": "POST", - "uri": "/describeWebsiteCertificateAuthority", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DescribeWebsiteCertificateAuthorityRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "WebsiteCaId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the certificate authority.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DescribeWebsiteCertificateAuthorityResponse": { - "type": "structure", - "members": { - "Certificate": { - "target": "com.amazonaws.worklink#Certificate", - "traits": { - "smithy.api#documentation": "

The root certificate of the certificate authority.

" - } - }, - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time that the certificate authority was added.

" - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The certificate name to display.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DeviceManufacturer": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#DeviceModel": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#DeviceOperatingSystemName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#DeviceOperatingSystemVersion": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#DevicePatchLevel": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#DeviceStatus": { - "type": "enum", - "members": { - "ACTIVE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE" - } - }, - "SIGNED_OUT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SIGNED_OUT" - } - } - } - }, - "com.amazonaws.worklink#DeviceSummary": { - "type": "structure", - "members": { - "DeviceId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

The ID of the device.

" - } - }, - "DeviceStatus": { - "target": "com.amazonaws.worklink#DeviceStatus", - "traits": { - "smithy.api#documentation": "

The status of the device.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The summary of devices.

" - } - }, - "com.amazonaws.worklink#DeviceSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#DeviceSummary" - } - }, - "com.amazonaws.worklink#DisassociateDomain": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DisassociateDomainRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DisassociateDomainResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Disassociates a domain from Amazon WorkLink. End users lose the ability to access the domain with Amazon WorkLink.

", - "smithy.api#http": { - "method": "POST", - "uri": "/disassociateDomain", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DisassociateDomainRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DisassociateDomainResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DisassociateWebsiteAuthorizationProvider": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DisassociateWebsiteAuthorizationProviderRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DisassociateWebsiteAuthorizationProviderResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceAlreadyExistsException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Disassociates a website authorization provider from a specified fleet. After the\n disassociation, users can't load any associated websites that require this authorization\n provider.

", - "smithy.api#http": { - "method": "POST", - "uri": "/disassociateWebsiteAuthorizationProvider", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DisassociateWebsiteAuthorizationProviderRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "AuthorizationProviderId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the authorization provider.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DisassociateWebsiteAuthorizationProviderResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DisassociateWebsiteCertificateAuthority": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#DisassociateWebsiteCertificateAuthorityRequest" - }, - "output": { - "target": "com.amazonaws.worklink#DisassociateWebsiteCertificateAuthorityResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Removes a certificate authority (CA).

", - "smithy.api#http": { - "method": "POST", - "uri": "/disassociateWebsiteCertificateAuthority", - "code": 200 - } - } - }, - "com.amazonaws.worklink#DisassociateWebsiteCertificateAuthorityRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "WebsiteCaId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the CA.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#DisassociateWebsiteCertificateAuthorityResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#DisplayName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 100 - } - } - }, - "com.amazonaws.worklink#DomainName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 253 - }, - "smithy.api#pattern": "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$" - } - }, - "com.amazonaws.worklink#DomainStatus": { - "type": "enum", - "members": { - "PENDING_VALIDATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING_VALIDATION" - } - }, - "ASSOCIATING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ASSOCIATING" - } - }, - "ACTIVE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE" - } - }, - "INACTIVE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INACTIVE" - } - }, - "DISASSOCIATING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DISASSOCIATING" - } - }, - "DISASSOCIATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DISASSOCIATED" - } - }, - "FAILED_TO_ASSOCIATE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED_TO_ASSOCIATE" - } - }, - "FAILED_TO_DISASSOCIATE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED_TO_DISASSOCIATE" - } - } - } - }, - "com.amazonaws.worklink#DomainSummary": { - "type": "structure", - "members": { - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

", - "smithy.api#required": {} - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name to display.

" - } - }, - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time that the domain was created.

", - "smithy.api#required": {} - } - }, - "DomainStatus": { - "target": "com.amazonaws.worklink#DomainStatus", - "traits": { - "smithy.api#documentation": "

The status of the domain.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "

The summary of the domain.

" - } - }, - "com.amazonaws.worklink#DomainSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#DomainSummary" - } - }, - "com.amazonaws.worklink#ExceptionMessage": { - "type": "string" - }, - "com.amazonaws.worklink#FleetArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 20, - "max": 2048 - } - } - }, - "com.amazonaws.worklink#FleetName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 48 - }, - "smithy.api#pattern": "^[a-z0-9](?:[a-z0-9\\-]{0,46}[a-z0-9])?$" - } - }, - "com.amazonaws.worklink#FleetStatus": { - "type": "enum", - "members": { - "CREATING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATING" - } - }, - "ACTIVE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE" - } - }, - "DELETING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETING" - } - }, - "DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETED" - } - }, - "FAILED_TO_CREATE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED_TO_CREATE" - } - }, - "FAILED_TO_DELETE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED_TO_DELETE" - } - } - } - }, - "com.amazonaws.worklink#FleetSummary": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

" - } - }, - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time when the fleet was created.

" - } - }, - "LastUpdatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time when the fleet was last updated.

" - } - }, - "FleetName": { - "target": "com.amazonaws.worklink#FleetName", - "traits": { - "smithy.api#documentation": "

The name of the fleet.

" - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name of the fleet to display.

" - } - }, - "CompanyCode": { - "target": "com.amazonaws.worklink#CompanyCode", - "traits": { - "smithy.api#documentation": "

The identifier used by users to sign into the Amazon WorkLink app.

" - } - }, - "FleetStatus": { - "target": "com.amazonaws.worklink#FleetStatus", - "traits": { - "smithy.api#documentation": "

The status of the fleet.

" - } - }, - "Tags": { - "target": "com.amazonaws.worklink#TagMap", - "traits": { - "smithy.api#documentation": "

The tags attached to the resource. A tag is a key-value pair.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The summary of the fleet.

" - } - }, - "com.amazonaws.worklink#FleetSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#FleetSummary" - } - }, - "com.amazonaws.worklink#Id": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#IdentityProviderType": { - "type": "enum", - "members": { - "SAML": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SAML" - } - } - } - }, - "com.amazonaws.worklink#InternalServerErrorException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.worklink#ExceptionMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The service is temporarily unavailable.

", - "smithy.api#error": "server", - "smithy.api#httpError": 500 - } - }, - "com.amazonaws.worklink#InvalidRequestException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.worklink#ExceptionMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The request is not valid.

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.worklink#ListDevices": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#ListDevicesRequest" - }, - "output": { - "target": "com.amazonaws.worklink#ListDevicesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Retrieves a list of devices registered with the specified fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/listDevices", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.worklink#ListDevicesRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n this value is null, it retrieves the first page.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.worklink#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be included in the next page.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#ListDevicesResponse": { - "type": "structure", - "members": { - "Devices": { - "target": "com.amazonaws.worklink#DeviceSummaryList", - "traits": { - "smithy.api#documentation": "

Information about the devices.

" - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n there are no more pages, this value is null.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#ListDomains": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#ListDomainsRequest" - }, - "output": { - "target": "com.amazonaws.worklink#ListDomainsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Retrieves a list of domains associated to a specified fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/listDomains", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.worklink#ListDomainsRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n this value is null, it retrieves the first page.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.worklink#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be included in the next page.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#ListDomainsResponse": { - "type": "structure", - "members": { - "Domains": { - "target": "com.amazonaws.worklink#DomainSummaryList", - "traits": { - "smithy.api#documentation": "

Information about the domains.

" - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n there are no more pages, this value is null.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#ListFleets": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#ListFleetsRequest" - }, - "output": { - "target": "com.amazonaws.worklink#ListFleetsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Retrieves a list of fleets for the current account and Region.

", - "smithy.api#http": { - "method": "POST", - "uri": "/listFleets", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.worklink#ListFleetsRequest": { - "type": "structure", - "members": { - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n this value is null, it retrieves the first page.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.worklink#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be included in the next page.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#ListFleetsResponse": { - "type": "structure", - "members": { - "FleetSummaryList": { - "target": "com.amazonaws.worklink#FleetSummaryList", - "traits": { - "smithy.api#documentation": "

The summary list of the fleets.

" - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n there are no more pages, this value is null.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#ListTagsForResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#ListTagsForResourceRequest" - }, - "output": { - "target": "com.amazonaws.worklink#ListTagsForResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InvalidRequestException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Retrieves a list of tags for the specified resource.

", - "smithy.api#http": { - "method": "GET", - "uri": "/tags/{ResourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.worklink#ListTagsForResourceRequest": { - "type": "structure", - "members": { - "ResourceArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#ListTagsForResourceResponse": { - "type": "structure", - "members": { - "Tags": { - "target": "com.amazonaws.worklink#TagMap", - "traits": { - "smithy.api#documentation": "

The tags attached to the resource. A tag is a key-value pair.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#ListWebsiteAuthorizationProviders": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#ListWebsiteAuthorizationProvidersRequest" - }, - "output": { - "target": "com.amazonaws.worklink#ListWebsiteAuthorizationProvidersResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Retrieves a list of website authorization providers associated with a specified fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/listWebsiteAuthorizationProviders", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.worklink#ListWebsiteAuthorizationProvidersRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

" - } - }, - "MaxResults": { - "target": "com.amazonaws.worklink#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be included in the next page.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#ListWebsiteAuthorizationProvidersResponse": { - "type": "structure", - "members": { - "WebsiteAuthorizationProviders": { - "target": "com.amazonaws.worklink#WebsiteAuthorizationProvidersSummaryList", - "traits": { - "smithy.api#documentation": "

The website authorization providers.

" - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token to use to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#ListWebsiteCertificateAuthorities": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#ListWebsiteCertificateAuthoritiesRequest" - }, - "output": { - "target": "com.amazonaws.worklink#ListWebsiteCertificateAuthoritiesResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Retrieves a list of certificate authorities added for the current account and\n Region.

", - "smithy.api#http": { - "method": "POST", - "uri": "/listWebsiteCertificateAuthorities", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.worklink#ListWebsiteCertificateAuthoritiesRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "MaxResults": { - "target": "com.amazonaws.worklink#MaxResults", - "traits": { - "smithy.api#documentation": "

The maximum number of results to be included in the next page.

" - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n this value is null, it retrieves the first page.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#ListWebsiteCertificateAuthoritiesResponse": { - "type": "structure", - "members": { - "WebsiteCertificateAuthorities": { - "target": "com.amazonaws.worklink#WebsiteCaSummaryList", - "traits": { - "smithy.api#documentation": "

Information about the certificates.

" - } - }, - "NextToken": { - "target": "com.amazonaws.worklink#NextToken", - "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation. If\n there are no more pages, this value is null.

" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#MaxResults": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1 - } - } - }, - "com.amazonaws.worklink#NextToken": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 4096 - }, - "smithy.api#pattern": "^[\\w\\-]+$" - } - }, - "com.amazonaws.worklink#ResourceAlreadyExistsException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.worklink#ExceptionMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The resource already exists.

", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.worklink#ResourceNotFoundException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.worklink#ExceptionMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The requested resource was not found.

", - "smithy.api#error": "client", - "smithy.api#httpError": 404 - } - }, - "com.amazonaws.worklink#RestoreDomainAccess": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#RestoreDomainAccessRequest" - }, - "output": { - "target": "com.amazonaws.worklink#RestoreDomainAccessResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Moves a domain to ACTIVE status if it was in the INACTIVE status.

", - "smithy.api#http": { - "method": "POST", - "uri": "/restoreDomainAccess", - "code": 200 - } - } - }, - "com.amazonaws.worklink#RestoreDomainAccessRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#RestoreDomainAccessResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#RevokeDomainAccess": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#RevokeDomainAccessRequest" - }, - "output": { - "target": "com.amazonaws.worklink#RevokeDomainAccessResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Moves a domain to INACTIVE status if it was in the ACTIVE status.

", - "smithy.api#http": { - "method": "POST", - "uri": "/revokeDomainAccess", - "code": 200 - } - } - }, - "com.amazonaws.worklink#RevokeDomainAccessRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#RevokeDomainAccessResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#SamlMetadata": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 204800 - } - } - }, - "com.amazonaws.worklink#SecurityGroupId": { - "type": "string", - "traits": { - "smithy.api#pattern": "^sg-([0-9a-f]{8}|[0-9a-f]{17})$" - } - }, - "com.amazonaws.worklink#SecurityGroupIds": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#SecurityGroupId" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 5 - } - } - }, - "com.amazonaws.worklink#SignOutUser": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#SignOutUserRequest" - }, - "output": { - "target": "com.amazonaws.worklink#SignOutUserResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Signs the user out from all of their devices. The user can sign in again if they have\n valid credentials.

", - "smithy.api#http": { - "method": "POST", - "uri": "/signOutUser", - "code": 200 - } - } - }, - "com.amazonaws.worklink#SignOutUserRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "Username": { - "target": "com.amazonaws.worklink#Username", - "traits": { - "smithy.api#documentation": "

The name of the user.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#SignOutUserResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#SubnetId": { - "type": "string", - "traits": { - "smithy.api#pattern": "^subnet-([0-9a-f]{8}|[0-9a-f]{17})$" - } - }, - "com.amazonaws.worklink#SubnetIds": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#SubnetId" - } - }, - "com.amazonaws.worklink#TagKey": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 128 - }, - "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" - } - }, - "com.amazonaws.worklink#TagKeyList": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#TagKey" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 50 - } - } - }, - "com.amazonaws.worklink#TagMap": { - "type": "map", - "key": { - "target": "com.amazonaws.worklink#TagKey" - }, - "value": { - "target": "com.amazonaws.worklink#TagValue" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 50 - } - } - }, - "com.amazonaws.worklink#TagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#TagResourceRequest" - }, - "output": { - "target": "com.amazonaws.worklink#TagResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InvalidRequestException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Adds or overwrites one or more tags for the specified resource, such as a fleet. Each tag consists of a key and an optional value. If a resource already has a tag with the same key, this operation updates its value.

", - "smithy.api#http": { - "method": "POST", - "uri": "/tags/{ResourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.worklink#TagResourceRequest": { - "type": "structure", - "members": { - "ResourceArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "Tags": { - "target": "com.amazonaws.worklink#TagMap", - "traits": { - "smithy.api#documentation": "

The tags to add to the resource. A tag is a key-value pair.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#TagResourceResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#TagValue": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 256 - } - } - }, - "com.amazonaws.worklink#TooManyRequestsException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.worklink#ExceptionMessage" - } - }, - "traits": { - "smithy.api#documentation": "

The number of requests exceeds the limit.

", - "smithy.api#error": "client", - "smithy.api#httpError": 429 - } - }, - "com.amazonaws.worklink#UnauthorizedException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.worklink#ExceptionMessage" - } - }, - "traits": { - "smithy.api#documentation": "

You are not authorized to perform this action.

", - "smithy.api#error": "client", - "smithy.api#httpError": 403 - } - }, - "com.amazonaws.worklink#UntagResource": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UntagResourceRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UntagResourceResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InvalidRequestException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Removes one or more tags from the specified resource.

", - "smithy.api#http": { - "method": "DELETE", - "uri": "/tags/{ResourceArn}", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UntagResourceRequest": { - "type": "structure", - "members": { - "ResourceArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the fleet.

", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "TagKeys": { - "target": "com.amazonaws.worklink#TagKeyList", - "traits": { - "smithy.api#documentation": "

The list of tag keys to remove from the resource.

", - "smithy.api#httpQuery": "tagKeys", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UntagResourceResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#UpdateAuditStreamConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UpdateAuditStreamConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UpdateAuditStreamConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Updates the audit stream configuration for the fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/updateAuditStreamConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UpdateAuditStreamConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "AuditStreamArn": { - "target": "com.amazonaws.worklink#AuditStreamArn", - "traits": { - "smithy.api#documentation": "

The ARN of the Amazon Kinesis data stream that receives the audit events.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UpdateAuditStreamConfigurationResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#UpdateCompanyNetworkConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UpdateCompanyNetworkConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UpdateCompanyNetworkConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Updates the company network configuration for the fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/updateCompanyNetworkConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UpdateCompanyNetworkConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "VpcId": { - "target": "com.amazonaws.worklink#VpcId", - "traits": { - "smithy.api#documentation": "

The VPC with connectivity to associated websites.

", - "smithy.api#required": {} - } - }, - "SubnetIds": { - "target": "com.amazonaws.worklink#SubnetIds", - "traits": { - "smithy.api#documentation": "

The subnets used for X-ENI connections from Amazon WorkLink rendering containers.

", - "smithy.api#required": {} - } - }, - "SecurityGroupIds": { - "target": "com.amazonaws.worklink#SecurityGroupIds", - "traits": { - "smithy.api#documentation": "

The security groups associated with access to the provided subnets.

", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UpdateCompanyNetworkConfigurationResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#UpdateDevicePolicyConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UpdateDevicePolicyConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UpdateDevicePolicyConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Updates the device policy configuration for the fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/updateDevicePolicyConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UpdateDevicePolicyConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DeviceCaCertificate": { - "target": "com.amazonaws.worklink#CertificateChain", - "traits": { - "smithy.api#documentation": "

The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UpdateDevicePolicyConfigurationResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#UpdateDomainMetadata": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UpdateDomainMetadataRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UpdateDomainMetadataResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Updates domain metadata, such as DisplayName.

", - "smithy.api#http": { - "method": "POST", - "uri": "/updateDomainMetadata", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UpdateDomainMetadataRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The name of the domain.

", - "smithy.api#required": {} - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name to display.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UpdateDomainMetadataResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#UpdateFleetMetadata": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UpdateFleetMetadataRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UpdateFleetMetadataResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Updates fleet metadata, such as DisplayName.

", - "smithy.api#http": { - "method": "POST", - "uri": "/UpdateFleetMetadata", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UpdateFleetMetadataRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The fleet name to display. The existing DisplayName is unset if null is passed.

" - } - }, - "OptimizeForEndUserLocation": { - "target": "com.amazonaws.worklink#Boolean", - "traits": { - "smithy.api#documentation": "

The option to optimize for better performance by routing traffic through the closest\n AWS Region to users, which may be outside of your home Region.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UpdateFleetMetadataResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#UpdateIdentityProviderConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.worklink#UpdateIdentityProviderConfigurationRequest" - }, - "output": { - "target": "com.amazonaws.worklink#UpdateIdentityProviderConfigurationResponse" - }, - "errors": [ - { - "target": "com.amazonaws.worklink#InternalServerErrorException" - }, - { - "target": "com.amazonaws.worklink#InvalidRequestException" - }, - { - "target": "com.amazonaws.worklink#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.worklink#TooManyRequestsException" - }, - { - "target": "com.amazonaws.worklink#UnauthorizedException" - } - ], - "traits": { - "smithy.api#deprecated": { - "message": "Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK." - }, - "smithy.api#documentation": "

Updates the identity provider configuration for the fleet.

", - "smithy.api#http": { - "method": "POST", - "uri": "/updateIdentityProviderConfiguration", - "code": 200 - } - } - }, - "com.amazonaws.worklink#UpdateIdentityProviderConfigurationRequest": { - "type": "structure", - "members": { - "FleetArn": { - "target": "com.amazonaws.worklink#FleetArn", - "traits": { - "smithy.api#documentation": "

The ARN of the fleet.

", - "smithy.api#required": {} - } - }, - "IdentityProviderType": { - "target": "com.amazonaws.worklink#IdentityProviderType", - "traits": { - "smithy.api#documentation": "

The type of identity provider.

", - "smithy.api#required": {} - } - }, - "IdentityProviderSamlMetadata": { - "target": "com.amazonaws.worklink#SamlMetadata", - "traits": { - "smithy.api#documentation": "

The SAML metadata document provided by the customer’s identity provider. The existing\n IdentityProviderSamlMetadata is unset if null is passed.

" - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.worklink#UpdateIdentityProviderConfigurationResponse": { - "type": "structure", - "members": {}, - "traits": { - "smithy.api#output": {} - } - }, - "com.amazonaws.worklink#Username": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 256 - } - } - }, - "com.amazonaws.worklink#VpcId": { - "type": "string", - "traits": { - "smithy.api#pattern": "^vpc-([0-9a-f]{8}|[0-9a-f]{17})$" - } - }, - "com.amazonaws.worklink#WebsiteAuthorizationProviderSummary": { - "type": "structure", - "members": { - "AuthorizationProviderId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the authorization provider.

" - } - }, - "AuthorizationProviderType": { - "target": "com.amazonaws.worklink#AuthorizationProviderType", - "traits": { - "smithy.api#documentation": "

The authorization provider type.

", - "smithy.api#required": {} - } - }, - "DomainName": { - "target": "com.amazonaws.worklink#DomainName", - "traits": { - "smithy.api#documentation": "

The domain name of the authorization provider. This applies only to SAML-based\n authorization providers.

" - } - }, - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time of creation.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The summary of the website authorization provider.

" - } - }, - "com.amazonaws.worklink#WebsiteAuthorizationProvidersSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#WebsiteAuthorizationProviderSummary" - } - }, - "com.amazonaws.worklink#WebsiteCaSummary": { - "type": "structure", - "members": { - "WebsiteCaId": { - "target": "com.amazonaws.worklink#Id", - "traits": { - "smithy.api#documentation": "

A unique identifier for the CA.

" - } - }, - "CreatedTime": { - "target": "com.amazonaws.worklink#DateTime", - "traits": { - "smithy.api#documentation": "

The time when the CA was added.

" - } - }, - "DisplayName": { - "target": "com.amazonaws.worklink#DisplayName", - "traits": { - "smithy.api#documentation": "

The name to display.

" - } - } - }, - "traits": { - "smithy.api#documentation": "

The summary of the certificate authority (CA).

" - } - }, - "com.amazonaws.worklink#WebsiteCaSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.worklink#WebsiteCaSummary" - } - }, - "com.amazonaws.worklink#WorkLink": { - "type": "service", - "version": "2018-09-25", - "operations": [ - { - "target": "com.amazonaws.worklink#AssociateDomain" - }, - { - "target": "com.amazonaws.worklink#AssociateWebsiteAuthorizationProvider" - }, - { - "target": "com.amazonaws.worklink#AssociateWebsiteCertificateAuthority" - }, - { - "target": "com.amazonaws.worklink#CreateFleet" - }, - { - "target": "com.amazonaws.worklink#DeleteFleet" - }, - { - "target": "com.amazonaws.worklink#DescribeAuditStreamConfiguration" - }, - { - "target": "com.amazonaws.worklink#DescribeCompanyNetworkConfiguration" - }, - { - "target": "com.amazonaws.worklink#DescribeDevice" - }, - { - "target": "com.amazonaws.worklink#DescribeDevicePolicyConfiguration" - }, - { - "target": "com.amazonaws.worklink#DescribeDomain" - }, - { - "target": "com.amazonaws.worklink#DescribeFleetMetadata" - }, - { - "target": "com.amazonaws.worklink#DescribeIdentityProviderConfiguration" - }, - { - "target": "com.amazonaws.worklink#DescribeWebsiteCertificateAuthority" - }, - { - "target": "com.amazonaws.worklink#DisassociateDomain" - }, - { - "target": "com.amazonaws.worklink#DisassociateWebsiteAuthorizationProvider" - }, - { - "target": "com.amazonaws.worklink#DisassociateWebsiteCertificateAuthority" - }, - { - "target": "com.amazonaws.worklink#ListDevices" - }, - { - "target": "com.amazonaws.worklink#ListDomains" - }, - { - "target": "com.amazonaws.worklink#ListFleets" - }, - { - "target": "com.amazonaws.worklink#ListTagsForResource" - }, - { - "target": "com.amazonaws.worklink#ListWebsiteAuthorizationProviders" - }, - { - "target": "com.amazonaws.worklink#ListWebsiteCertificateAuthorities" - }, - { - "target": "com.amazonaws.worklink#RestoreDomainAccess" - }, - { - "target": "com.amazonaws.worklink#RevokeDomainAccess" - }, - { - "target": "com.amazonaws.worklink#SignOutUser" - }, - { - "target": "com.amazonaws.worklink#TagResource" - }, - { - "target": "com.amazonaws.worklink#UntagResource" - }, - { - "target": "com.amazonaws.worklink#UpdateAuditStreamConfiguration" - }, - { - "target": "com.amazonaws.worklink#UpdateCompanyNetworkConfiguration" - }, - { - "target": "com.amazonaws.worklink#UpdateDevicePolicyConfiguration" - }, - { - "target": "com.amazonaws.worklink#UpdateDomainMetadata" - }, - { - "target": "com.amazonaws.worklink#UpdateFleetMetadata" - }, - { - "target": "com.amazonaws.worklink#UpdateIdentityProviderConfiguration" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "WorkLink", - "arnNamespace": "worklink", - "cloudFormationName": "WorkLink", - "cloudTrailEventSource": "worklink.amazonaws.com", - "endpointPrefix": "worklink" - }, - "aws.auth#sigv4": { - "name": "worklink" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Amazon WorkLink is a cloud-based service that provides secure access\n to internal websites and web apps from iOS and Android phones. In a single step, your users, such as\n employees, can access internal websites as efficiently as they access any other public website.\n They enter a URL in their web browser, or choose a link to an internal website in an email. Amazon WorkLink\n authenticates the user's access and securely renders authorized internal web content in a secure\n rendering service in the AWS cloud. Amazon WorkLink doesn't download or store any internal web content on\n mobile devices.

", - "smithy.api#title": "Amazon WorkLink", - "smithy.rules#endpointRuleSet": { - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "Boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "String" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://worklink-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://worklink-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://worklink.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://worklink.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] - }, - "smithy.rules#endpointTests": { - "testCases": [ - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://worklink.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://worklink.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink.cn-north-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://worklink.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://worklink.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Missing region", - "expect": { - "error": "Invalid Configuration: Missing Region" - } - } - ], - "version": "1.0" - } - } - } - } -} diff --git a/models/workspaces-web.json b/models/workspaces-web.json index 10dae7296a..da9505d559 100644 --- a/models/workspaces-web.json +++ b/models/workspaces-web.json @@ -15,6 +15,15 @@ "type": "service", "version": "2020-07-08", "operations": [ + { + "target": "com.amazonaws.workspacesweb#ExpireSession" + }, + { + "target": "com.amazonaws.workspacesweb#GetSession" + }, + { + "target": "com.amazonaws.workspacesweb#ListSessions" + }, { "target": "com.amazonaws.workspacesweb#ListTagsForResource" }, @@ -64,7 +73,7 @@ "name": "workspaces-web" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

Amazon WorkSpaces Secure Browser is a low cost, fully managed WorkSpace built specifically to facilitate\n secure, web-based workloads. WorkSpaces Secure Browser makes it easy for customers to safely provide\n their employees with access to internal websites and SaaS web applications without the\n administrative burden of appliances or specialized client software. WorkSpaces Secure Browser provides\n simple policy tools tailored for user interactions, while offloading common tasks like\n capacity management, scaling, and maintaining browser images.

", + "smithy.api#documentation": "

Amazon WorkSpaces Secure Browser is a low cost, fully managed WorkSpace built\n specifically to facilitate secure, web-based workloads. WorkSpaces Secure Browser makes it\n easy for customers to safely provide their employees with access to internal websites and\n SaaS web applications without the administrative burden of appliances or specialized client\n software. WorkSpaces Secure Browser provides simple policy tools tailored for user\n interactions, while offloading common tasks like capacity management, scaling, and\n maintaining browser images.

", "smithy.api#title": "Amazon WorkSpaces Web", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -1303,7 +1312,7 @@ "customerManagedKey": { "target": "com.amazonaws.workspacesweb#keyArn", "traits": { - "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the browser settings.

" + "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the browser\n settings.

" } }, "additionalEncryptionContext": { @@ -1636,19 +1645,19 @@ "allowlist": { "target": "com.amazonaws.workspacesweb#CookieSpecifications", "traits": { - "smithy.api#documentation": "

The list of cookie specifications that are allowed to be synchronized to the remote browser.

", + "smithy.api#documentation": "

The list of cookie specifications that are allowed to be synchronized to the remote\n browser.

", "smithy.api#required": {} } }, "blocklist": { "target": "com.amazonaws.workspacesweb#CookieSpecifications", "traits": { - "smithy.api#documentation": "

The list of cookie specifications that are blocked from being synchronized to the remote browser.

" + "smithy.api#documentation": "

The list of cookie specifications that are blocked from being synchronized to the remote\n browser.

" } } }, "traits": { - "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

", + "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end\n user's local browser to the remote browser.

", "smithy.api#sensitive": {} } }, @@ -1724,7 +1733,7 @@ "clientToken": { "target": "com.amazonaws.workspacesweb#ClientToken", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

\n

If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

\n

If you do not specify a client token, one is automatically generated by the Amazon Web Services SDK.\n

", "smithy.api#idempotencyToken": {} } } @@ -1815,7 +1824,7 @@ "identityProviderDetails": { "target": "com.amazonaws.workspacesweb#IdentityProviderDetails", "traits": { - "smithy.api#documentation": "

The identity provider details. The following list describes the provider detail keys for\n each identity provider type.

\n
    \n
  • \n

    For Google and Login with Amazon:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For Facebook:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n api_version\n

      \n
    • \n
    \n
  • \n
  • \n

    For Sign in with Apple:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n team_id\n

      \n
    • \n
    • \n

      \n key_id\n

      \n
    • \n
    • \n

      \n private_key\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For OIDC providers:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n attributes_request_method\n

      \n
    • \n
    • \n

      \n oidc_issuer\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n authorize_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n token_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n attributes_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n jwks_uri\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    \n
  • \n
  • \n

    For SAML providers:

    \n
      \n
    • \n

      \n MetadataFile OR MetadataURL\n

      \n
    • \n
    • \n

      \n IDPSignout (boolean) \n optional\n

      \n
    • \n
    • \n

      \n IDPInit (boolean) optional\n

      \n
    • \n
    • \n

      \n RequestSigningAlgorithm (string) optional\n - Only accepts rsa-sha256\n

      \n
    • \n
    • \n

      \n EncryptedResponses (boolean) optional\n

      \n
    • \n
    \n
  • \n
", + "smithy.api#documentation": "

The identity provider details. The following list describes the provider detail keys for\n each identity provider type.

\n
    \n
  • \n

    For Google and Login with Amazon:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For Facebook:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n api_version\n

      \n
    • \n
    \n
  • \n
  • \n

    For Sign in with Apple:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n team_id\n

      \n
    • \n
    • \n

      \n key_id\n

      \n
    • \n
    • \n

      \n private_key\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For OIDC providers:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n attributes_request_method\n

      \n
    • \n
    • \n

      \n oidc_issuer\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n authorize_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n token_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n attributes_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n jwks_uri\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    \n
  • \n
  • \n

    For SAML providers:

    \n
      \n
    • \n

      \n MetadataFile OR MetadataURL\n

      \n
    • \n
    • \n

      \n IDPSignout (boolean) optional\n

      \n
    • \n
    • \n

      \n IDPInit (boolean) optional\n

      \n
    • \n
    • \n

      \n RequestSigningAlgorithm (string) optional\n - Only accepts rsa-sha256\n

      \n
    • \n
    • \n

      \n EncryptedResponses (boolean) optional\n

      \n
    • \n
    \n
  • \n
", "smithy.api#required": {} } }, @@ -2007,14 +2016,14 @@ "subnetIds": { "target": "com.amazonaws.workspacesweb#SubnetIdList", "traits": { - "smithy.api#documentation": "

The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

", + "smithy.api#documentation": "

The subnets in which network interfaces are created to connect streaming instances to\n your VPC. At least two of these subnets must be in different availability zones.

", "smithy.api#required": {} } }, "securityGroupIds": { "target": "com.amazonaws.workspacesweb#SecurityGroupIdList", "traits": { - "smithy.api#documentation": "

One or more security groups used to control access from streaming instances to your VPC.

", + "smithy.api#documentation": "

One or more security groups used to control access from streaming instances to your\n VPC.

", "smithy.api#required": {} } }, @@ -2098,7 +2107,7 @@ "displayName": { "target": "com.amazonaws.workspacesweb#DisplayName", "traits": { - "smithy.api#documentation": "

The name of the web portal. This is not visible to users who log into the web portal.

" + "smithy.api#documentation": "

The name of the web portal. This is not visible to users who log into the web\n portal.

" } }, "tags": { @@ -2130,7 +2139,7 @@ "authenticationType": { "target": "com.amazonaws.workspacesweb#AuthenticationType", "traits": { - "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including\n external identity provider integration), plus user and group access to your web portal,\n can be configured in the IAM Identity Center.

" + "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources\n (including external identity provider integration), plus user and group access to your web\n portal, can be configured in the IAM Identity Center.

" } }, "instanceType": { @@ -2163,7 +2172,7 @@ "portalEndpoint": { "target": "com.amazonaws.workspacesweb#PortalEndpoint", "traits": { - "smithy.api#documentation": "

The endpoint URL of the web portal that users access in order to start streaming sessions.

", + "smithy.api#documentation": "

The endpoint URL of the web portal that users access in order to start streaming\n sessions.

", "smithy.api#required": {} } } @@ -2282,7 +2291,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a user access logging settings resource that can be associated with a web portal.

", + "smithy.api#documentation": "

Creates a user access logging settings resource that can be associated with a web\n portal.

", "smithy.api#http": { "method": "POST", "uri": "/userAccessLoggingSettings", @@ -2420,14 +2429,14 @@ "target": "com.amazonaws.workspacesweb#DisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users\n disconnect.

" } }, "idleDisconnectTimeoutInMinutes": { "target": "com.amazonaws.workspacesweb#IdleDisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.

" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from\n their streaming session and the disconnect timeout interval begins.

" } }, "clientToken": { @@ -2440,13 +2449,13 @@ "cookieSynchronizationConfiguration": { "target": "com.amazonaws.workspacesweb#CookieSynchronizationConfiguration", "traits": { - "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

" + "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end\n user's local browser to the remote browser.

" } }, "customerManagedKey": { "target": "com.amazonaws.workspacesweb#keyArn", "traits": { - "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the user settings.

" + "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the user\n settings.

" } }, "additionalEncryptionContext": { @@ -2458,7 +2467,7 @@ "deepLinkAllowed": { "target": "com.amazonaws.workspacesweb#EnabledType", "traits": { - "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to\n a session.

" } } }, @@ -3369,6 +3378,72 @@ "com.amazonaws.workspacesweb#ExceptionMessage": { "type": "string" }, + "com.amazonaws.workspacesweb#ExpireSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ExpireSessionRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ExpireSessionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Expires an active secure browser session.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/portals/{portalId}/sessions/{sessionId}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#ExpireSessionRequest": { + "type": "structure", + "members": { + "portalId": { + "target": "com.amazonaws.workspacesweb#PortalId", + "traits": { + "smithy.api#documentation": "

The ID of the web portal for the session.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.workspacesweb#SessionId", + "traits": { + "smithy.api#documentation": "

The ID of the session to expire.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspacesweb#ExpireSessionResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workspacesweb#FieldName": { "type": "string" }, @@ -3775,6 +3850,80 @@ "smithy.api#output": {} } }, + "com.amazonaws.workspacesweb#GetSession": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetSessionRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetSessionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Gets information for a secure browser session.

", + "smithy.api#http": { + "method": "GET", + "uri": "/portals/{portalId}/sessions/{sessionId}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetSessionRequest": { + "type": "structure", + "members": { + "portalId": { + "target": "com.amazonaws.workspacesweb#PortalId", + "traits": { + "smithy.api#documentation": "

The ID of the web portal for the session.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sessionId": { + "target": "com.amazonaws.workspacesweb#SessionId", + "traits": { + "smithy.api#documentation": "

The ID of the session.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspacesweb#GetSessionResponse": { + "type": "structure", + "members": { + "session": { + "target": "com.amazonaws.workspacesweb#Session", + "traits": { + "smithy.api#documentation": "

The sessions in a list.

", + "smithy.api#nestedProperties": {} + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.workspacesweb#GetTrustStore": { "type": "operation", "input": { @@ -4083,7 +4232,7 @@ "identityProviderDetails": { "target": "com.amazonaws.workspacesweb#IdentityProviderDetails", "traits": { - "smithy.api#documentation": "

The identity provider details. The following list describes the provider detail keys for\n each identity provider type.

\n
    \n
  • \n

    For Google and Login with Amazon:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For Facebook:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n api_version\n

      \n
    • \n
    \n
  • \n
  • \n

    For Sign in with Apple:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n team_id\n

      \n
    • \n
    • \n

      \n key_id\n

      \n
    • \n
    • \n

      \n private_key\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For OIDC providers:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n attributes_request_method\n

      \n
    • \n
    • \n

      \n oidc_issuer\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n authorize_url\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    • \n

      \n token_url\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    • \n

      \n attributes_url\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    • \n

      \n jwks_uri\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    \n
  • \n
  • \n

    For SAML providers:

    \n
      \n
    • \n

      \n MetadataFile OR MetadataURL\n

      \n
    • \n
    • \n

      \n IDPSignout (boolean) optional\n

      \n
    • \n
    • \n

      \n IDPInit (boolean) optional\n

      \n
    • \n
    • \n

      \n RequestSigningAlgorithm (string) optional\n - Only accepts rsa-sha256\n

      \n
    • \n
    • \n

      \n EncryptedResponses (boolean) optional\n

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The identity provider details. The following list describes the provider detail keys for\n each identity provider type.

\n
    \n
  • \n

    For Google and Login with Amazon:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For Facebook:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n api_version\n

      \n
    • \n
    \n
  • \n
  • \n

    For Sign in with Apple:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n team_id\n

      \n
    • \n
    • \n

      \n key_id\n

      \n
    • \n
    • \n

      \n private_key\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For OIDC providers:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n attributes_request_method\n

      \n
    • \n
    • \n

      \n oidc_issuer\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n authorize_url\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    • \n

      \n token_url\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    • \n

      \n attributes_url\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    • \n

      \n jwks_uri\n if not available from discovery URL specified by oidc_issuer\n key\n

      \n
    • \n
    \n
  • \n
  • \n

    For SAML providers:

    \n
      \n
    • \n

      \n MetadataFile OR MetadataURL\n

      \n
    • \n
    • \n

      \n IDPSignout (boolean) optional\n

      \n
    • \n
    • \n

      \n IDPInit (boolean) optional\n

      \n
    • \n
    • \n

      \n RequestSigningAlgorithm (string) optional\n - Only accepts rsa-sha256\n

      \n
    • \n
    • \n

      \n EncryptedResponses (boolean) optional\n

      \n
    • \n
    \n
  • \n
" } } }, @@ -4294,7 +4443,7 @@ "associatedPortalArns": { "target": "com.amazonaws.workspacesweb#ArnList", "traits": { - "smithy.api#documentation": "

A list of web portal ARNs that this IP access settings resource is associated with.

" + "smithy.api#documentation": "

A list of web portal ARNs that this IP access settings resource is associated\n with.

" } }, "ipRules": { @@ -4324,7 +4473,7 @@ "customerManagedKey": { "target": "com.amazonaws.workspacesweb#keyArn", "traits": { - "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the IP access settings.

" + "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the IP access\n settings.

" } }, "additionalEncryptionContext": { @@ -4439,6 +4588,29 @@ "smithy.api#documentation": "

The summary of IP access settings.

" } }, + "com.amazonaws.workspacesweb#IpAddress": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 15 + }, + "smithy.api#pattern": "^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))$|^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.workspacesweb#IpAddressList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#IpAddress" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 45 + } + } + }, "com.amazonaws.workspacesweb#IpRange": { "type": "string", "traits": { @@ -4535,7 +4707,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken" } }, @@ -4563,7 +4735,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } } }, @@ -4614,7 +4786,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken" } }, @@ -4644,7 +4816,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } }, "identityProviders": { @@ -4701,7 +4873,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken" } }, @@ -4729,7 +4901,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } } }, @@ -4780,7 +4952,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken" } }, @@ -4808,7 +4980,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } } }, @@ -4859,7 +5031,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.\n

", "smithy.api#httpQuery": "nextToken" } }, @@ -4887,7 +5059,127 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.\n

" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, + "com.amazonaws.workspacesweb#ListSessions": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListSessionsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListSessionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

Lists information for multiple secure browser sessions from a specific portal.

", + "smithy.api#http": { + "method": "GET", + "uri": "/portals/{portalId}/sessions", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults", + "items": "sessions" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListSessionsRequest": { + "type": "structure", + "members": { + "portalId": { + "target": "com.amazonaws.workspacesweb#PortalId", + "traits": { + "smithy.api#documentation": "

The ID of the web portal for the sessions.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "username": { + "target": "com.amazonaws.workspacesweb#Username", + "traits": { + "smithy.api#documentation": "

The username of the session.

", + "smithy.api#httpQuery": "username" + } + }, + "sessionId": { + "target": "com.amazonaws.workspacesweb#SessionId", + "traits": { + "smithy.api#documentation": "

The ID of the session.

", + "smithy.api#httpQuery": "sessionId" + } + }, + "sortBy": { + "target": "com.amazonaws.workspacesweb#SessionSortBy", + "traits": { + "smithy.api#documentation": "

The method in which the returned sessions should be sorted.

", + "smithy.api#httpQuery": "sortBy" + } + }, + "status": { + "target": "com.amazonaws.workspacesweb#SessionStatus", + "traits": { + "smithy.api#documentation": "

The status of the session.

", + "smithy.api#httpQuery": "status" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of results to be included in the next page.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", + "smithy.api#httpQuery": "nextToken" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.workspacesweb#ListSessionsResponse": { + "type": "structure", + "members": { + "sessions": { + "target": "com.amazonaws.workspacesweb#SessionSummaryList", + "traits": { + "smithy.api#documentation": "

The sessions in a list.

", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } } }, @@ -5015,7 +5307,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken", "smithy.api#notProperty": {} } @@ -5056,7 +5348,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.>

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.>

", "smithy.api#notProperty": {} } } @@ -5108,7 +5400,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken" } }, @@ -5136,7 +5428,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } } }, @@ -5187,7 +5479,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

", "smithy.api#httpQuery": "nextToken" } }, @@ -5215,7 +5507,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this\n operation.

" } } }, @@ -5266,7 +5558,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

", + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.\n

", "smithy.api#httpQuery": "nextToken" } }, @@ -5294,7 +5586,7 @@ "nextToken": { "target": "com.amazonaws.workspacesweb#PaginationToken", "traits": { - "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.

" + "smithy.api#documentation": "

The pagination token used to retrieve the next page of results for this operation.\n

" } } }, @@ -5345,13 +5637,13 @@ "subnetIds": { "target": "com.amazonaws.workspacesweb#SubnetIdList", "traits": { - "smithy.api#documentation": "

The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

" + "smithy.api#documentation": "

The subnets in which network interfaces are created to connect streaming instances to\n your VPC. At least two of these subnets must be in different availability zones.

" } }, "securityGroupIds": { "target": "com.amazonaws.workspacesweb#SecurityGroupIdList", "traits": { - "smithy.api#documentation": "

One or more security groups used to control access from streaming instances to your VPC.

" + "smithy.api#documentation": "

One or more security groups used to control access from streaming instances to your VPC.\n

" } } }, @@ -5529,13 +5821,13 @@ "userAccessLoggingSettingsArn": { "target": "com.amazonaws.workspacesweb#ARN", "traits": { - "smithy.api#documentation": "

The ARN of the user access logging settings that is associated with the web portal.

" + "smithy.api#documentation": "

The ARN of the user access logging settings that is associated with the web\n portal.

" } }, "authenticationType": { "target": "com.amazonaws.workspacesweb#AuthenticationType", "traits": { - "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including\n external identity provider integration), plus user and group access to your web portal,\n can be configured in the IAM Identity Center.

" + "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources\n (including external identity provider integration), plus user and group access to your web\n portal, can be configured in the IAM Identity Center.

" } }, "ipAccessSettingsArn": { @@ -5583,6 +5875,16 @@ "smithy.api#pattern": "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$" } }, + "com.amazonaws.workspacesweb#PortalId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\-]+$" + } + }, "com.amazonaws.workspacesweb#PortalList": { "type": "list", "member": { @@ -5830,13 +6132,13 @@ "userAccessLoggingSettingsArn": { "target": "com.amazonaws.workspacesweb#ARN", "traits": { - "smithy.api#documentation": "

The ARN of the user access logging settings that is associated with the web portal.

" + "smithy.api#documentation": "

The ARN of the user access logging settings that is associated with the web\n portal.

" } }, "authenticationType": { "target": "com.amazonaws.workspacesweb#AuthenticationType", "traits": { - "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including\n external identity provider integration), plus user and group access to your web portal,\n can be configured in the IAM Identity Center.

" + "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources\n (including external identity provider integration), plus user and group access to your web\n portal, can be configured in the IAM Identity Center.

" } }, "ipAccessSettingsArn": { @@ -5985,6 +6287,150 @@ "smithy.api#httpError": 402 } }, + "com.amazonaws.workspacesweb#Session": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

The ARN of the web portal.

" + } + }, + "sessionId": { + "target": "com.amazonaws.workspacesweb#StringType", + "traits": { + "smithy.api#documentation": "

The ID of the session.

" + } + }, + "username": { + "target": "com.amazonaws.workspacesweb#Username", + "traits": { + "smithy.api#documentation": "

The username of the session.

" + } + }, + "clientIpAddresses": { + "target": "com.amazonaws.workspacesweb#IpAddressList", + "traits": { + "smithy.api#documentation": "

The IP address of the client.

" + } + }, + "status": { + "target": "com.amazonaws.workspacesweb#SessionStatus", + "traits": { + "smithy.api#documentation": "

The status of the session.

" + } + }, + "startTime": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

The start time of the session.

" + } + }, + "endTime": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

The end time of the session.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about a secure browser session.

" + } + }, + "com.amazonaws.workspacesweb#SessionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\-]+$" + } + }, + "com.amazonaws.workspacesweb#SessionSortBy": { + "type": "enum", + "members": { + "START_TIME_ASCENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "StartTimeAscending" + } + }, + "START_TIME_DESCENDING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "StartTimeDescending" + } + } + } + }, + "com.amazonaws.workspacesweb#SessionStatus": { + "type": "enum", + "members": { + "ACTIVE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Active" + } + }, + "TERMINATED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Terminated" + } + } + } + }, + "com.amazonaws.workspacesweb#SessionSummary": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

The ARN of the web portal.

" + } + }, + "sessionId": { + "target": "com.amazonaws.workspacesweb#StringType", + "traits": { + "smithy.api#documentation": "

The ID of the session.

" + } + }, + "username": { + "target": "com.amazonaws.workspacesweb#Username", + "traits": { + "smithy.api#documentation": "

The username of the session.

" + } + }, + "status": { + "target": "com.amazonaws.workspacesweb#SessionStatus", + "traits": { + "smithy.api#documentation": "

The status of the session.

" + } + }, + "startTime": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

The start time of the session.

" + } + }, + "endTime": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

The end time of the session.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary information about a secure browser session.

" + } + }, + "com.amazonaws.workspacesweb#SessionSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#SessionSummary" + } + }, "com.amazonaws.workspacesweb#StatusReason": { "type": "string", "traits": { @@ -6023,7 +6469,7 @@ "traits": { "smithy.api#length": { "min": 2, - "max": 3 + "max": 5 } } }, @@ -6542,7 +6988,7 @@ "identityProviderDetails": { "target": "com.amazonaws.workspacesweb#IdentityProviderDetails", "traits": { - "smithy.api#documentation": "

The details of the identity provider. The following list describes the provider detail keys for\n each identity provider type.

\n
    \n
  • \n

    For Google and Login with Amazon:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For Facebook:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n api_version\n

      \n
    • \n
    \n
  • \n
  • \n

    For Sign in with Apple:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n team_id\n

      \n
    • \n
    • \n

      \n key_id\n

      \n
    • \n
    • \n

      \n private_key\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For OIDC providers:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n attributes_request_method\n

      \n
    • \n
    • \n

      \n oidc_issuer\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n authorize_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n token_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n attributes_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n jwks_uri\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    \n
  • \n
  • \n

    For SAML providers:

    \n
      \n
    • \n

      \n MetadataFile OR MetadataURL\n

      \n
    • \n
    • \n

      \n IDPSignout (boolean) optional\n

      \n
    • \n
    • \n

      \n IDPInit (boolean) optional\n

      \n
    • \n
    • \n

      \n RequestSigningAlgorithm (string) optional\n - Only accepts rsa-sha256\n

      \n
    • \n
    • \n

      \n EncryptedResponses (boolean) optional\n

      \n
    • \n
    \n
  • \n
" + "smithy.api#documentation": "

The details of the identity provider. The following list describes the provider detail\n keys for each identity provider type.

\n
    \n
  • \n

    For Google and Login with Amazon:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For Facebook:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n api_version\n

      \n
    • \n
    \n
  • \n
  • \n

    For Sign in with Apple:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n team_id\n

      \n
    • \n
    • \n

      \n key_id\n

      \n
    • \n
    • \n

      \n private_key\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    \n
  • \n
  • \n

    For OIDC providers:

    \n
      \n
    • \n

      \n client_id\n

      \n
    • \n
    • \n

      \n client_secret\n

      \n
    • \n
    • \n

      \n attributes_request_method\n

      \n
    • \n
    • \n

      \n oidc_issuer\n

      \n
    • \n
    • \n

      \n authorize_scopes\n

      \n
    • \n
    • \n

      \n authorize_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n token_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n attributes_url\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    • \n

      \n jwks_uri\n if not available from discovery URL specified by\n oidc_issuer key\n

      \n
    • \n
    \n
  • \n
  • \n

    For SAML providers:

    \n
      \n
    • \n

      \n MetadataFile OR MetadataURL\n

      \n
    • \n
    • \n

      \n IDPSignout (boolean) optional\n

      \n
    • \n
    • \n

      \n IDPInit (boolean) optional\n

      \n
    • \n
    • \n

      \n RequestSigningAlgorithm (string) optional\n - Only accepts rsa-sha256\n

      \n
    • \n
    • \n

      \n EncryptedResponses (boolean) optional\n

      \n
    • \n
    \n
  • \n
" } }, "clientToken": { @@ -6718,13 +7164,13 @@ "subnetIds": { "target": "com.amazonaws.workspacesweb#SubnetIdList", "traits": { - "smithy.api#documentation": "

The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

" + "smithy.api#documentation": "

The subnets in which network interfaces are created to connect streaming instances to\n your VPC. At least two of these subnets must be in different availability zones.

" } }, "securityGroupIds": { "target": "com.amazonaws.workspacesweb#SecurityGroupIdList", "traits": { - "smithy.api#documentation": "

One or more security groups used to control access from streaming instances to your VPC.

" + "smithy.api#documentation": "

One or more security groups used to control access from streaming instances to your\n VPC.

" } }, "clientToken": { @@ -6810,13 +7256,13 @@ "displayName": { "target": "com.amazonaws.workspacesweb#DisplayName", "traits": { - "smithy.api#documentation": "

The name of the web portal. This is not visible to users who log into the web portal.

" + "smithy.api#documentation": "

The name of the web portal. This is not visible to users who log into the web\n portal.

" } }, "authenticationType": { "target": "com.amazonaws.workspacesweb#AuthenticationType", "traits": { - "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center (successor to Single Sign-On). Identity sources (including\n external identity provider integration), plus user and group access to your web portal,\n can be configured in the IAM Identity Center.

" + "smithy.api#documentation": "

The type of authentication integration points used when signing into the web portal.\n Defaults to Standard.

\n

\n Standard web portals are authenticated directly through your identity\n provider. You need to call CreateIdentityProvider to integrate your identity\n provider with your web portal. User and group access to your web portal is controlled\n through your identity provider.

\n

\n IAM Identity Center web portals are authenticated through IAM Identity Center. Identity sources\n (including external identity provider integration), plus user and group access to your web\n portal, can be configured in the IAM Identity Center.

" } }, "instanceType": { @@ -7098,14 +7544,14 @@ "target": "com.amazonaws.workspacesweb#DisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users\n disconnect.

" } }, "idleDisconnectTimeoutInMinutes": { "target": "com.amazonaws.workspacesweb#IdleDisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.

" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from\n their streaming session and the disconnect timeout interval begins.

" } }, "clientToken": { @@ -7118,13 +7564,13 @@ "cookieSynchronizationConfiguration": { "target": "com.amazonaws.workspacesweb#CookieSynchronizationConfiguration", "traits": { - "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

\n

If the allowlist and blocklist are empty, the configuration becomes null.

" + "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end\n user's local browser to the remote browser.

\n

If the allowlist and blocklist are empty, the configuration becomes null.

" } }, "deepLinkAllowed": { "target": "com.amazonaws.workspacesweb#EnabledType", "traits": { - "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to\n a session.

" } } }, @@ -7162,7 +7608,7 @@ "associatedPortalArns": { "target": "com.amazonaws.workspacesweb#ArnList", "traits": { - "smithy.api#documentation": "

A list of web portal ARNs that this user access logging settings is associated with.

" + "smithy.api#documentation": "

A list of web portal ARNs that this user access logging settings is associated\n with.

" } }, "kinesisStreamArn": { @@ -7301,26 +7747,26 @@ "target": "com.amazonaws.workspacesweb#DisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users\n disconnect.

" } }, "idleDisconnectTimeoutInMinutes": { "target": "com.amazonaws.workspacesweb#IdleDisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.

" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from\n their streaming session and the disconnect timeout interval begins.

" } }, "cookieSynchronizationConfiguration": { "target": "com.amazonaws.workspacesweb#CookieSynchronizationConfiguration", "traits": { - "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

" + "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end\n user's local browser to the remote browser.

" } }, "customerManagedKey": { "target": "com.amazonaws.workspacesweb#keyArn", "traits": { - "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the user settings.

" + "smithy.api#documentation": "

The customer managed key used to encrypt sensitive information in the user\n settings.

" } }, "additionalEncryptionContext": { @@ -7332,7 +7778,7 @@ "deepLinkAllowed": { "target": "com.amazonaws.workspacesweb#EnabledType", "traits": { - "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to\n a session.

" } } }, @@ -7467,26 +7913,26 @@ "target": "com.amazonaws.workspacesweb#DisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that a streaming session remains active after users disconnect.

" + "smithy.api#documentation": "

The amount of time that a streaming session remains active after users\n disconnect.

" } }, "idleDisconnectTimeoutInMinutes": { "target": "com.amazonaws.workspacesweb#IdleDisconnectTimeoutInMinutes", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.

" + "smithy.api#documentation": "

The amount of time that users can be idle (inactive) before they are disconnected from\n their streaming session and the disconnect timeout interval begins.

" } }, "cookieSynchronizationConfiguration": { "target": "com.amazonaws.workspacesweb#CookieSynchronizationConfiguration", "traits": { - "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.

" + "smithy.api#documentation": "

The configuration that specifies which cookies should be synchronized from the end\n user's local browser to the remote browser.

" } }, "deepLinkAllowed": { "target": "com.amazonaws.workspacesweb#EnabledType", "traits": { - "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to a session.

" + "smithy.api#documentation": "

Specifies whether the user can use deep links that open automatically when connecting to\n a session.

" } } }, @@ -7494,6 +7940,17 @@ "smithy.api#documentation": "

The summary of user settings.

" } }, + "com.amazonaws.workspacesweb#Username": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[\\s\\S]*$", + "smithy.api#sensitive": {} + } + }, "com.amazonaws.workspacesweb#ValidationException": { "type": "structure", "members": { diff --git a/models/workspaces.json b/models/workspaces.json index a9758e820c..d9aac9a865 100644 --- a/models/workspaces.json +++ b/models/workspaces.json @@ -2478,7 +2478,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates one or more WorkSpaces.

\n

This operation is asynchronous and returns before the WorkSpaces are created.

\n \n
    \n
  • \n

    The MANUAL running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    You don't need to specify the PCOIP protocol for Linux bundles\n because WSP is the default protocol for those bundles.

    \n
  • \n
  • \n

    User-decoupled WorkSpaces are only supported by Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    Review your running mode to ensure you are using one that is optimal for your needs and budget.\n For more information on switching running modes, see \n \n Can I switch between hourly and monthly billing?\n

    \n
  • \n
\n
" + "smithy.api#documentation": "

Creates one or more WorkSpaces.

\n

This operation is asynchronous and returns before the WorkSpaces are created.

\n \n
    \n
  • \n

    The MANUAL running mode value is only supported by Amazon WorkSpaces\n Core. Contact your account team to be allow-listed to use this value. For more\n information, see Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    You don't need to specify the PCOIP protocol for Linux bundles\n because DCV (formerly WSP) is the default protocol for those bundles.

    \n
  • \n
  • \n

    User-decoupled WorkSpaces are only supported by Amazon WorkSpaces\n Core.

    \n
  • \n
  • \n

    Review your running mode to ensure you are using one that is optimal for your needs and budget.\n For more information on switching running modes, see \n \n Can I switch between hourly and monthly billing?\n

    \n
  • \n
\n
" } }, "com.amazonaws.workspaces#CreateWorkspacesPool": { @@ -4833,7 +4833,20 @@ "outputToken": "NextToken", "items": "Workspaces", "pageSize": "Limit" - } + }, + "smithy.test#smokeTests": [ + { + "id": "DescribeWorkspacesSuccess", + "params": {}, + "vendorParams": { + "region": "us-west-2" + }, + "vendorParamsShape": "aws.test#AwsVendorParams", + "expect": { + "success": {} + } + } + ] } }, "com.amazonaws.workspaces#DescribeWorkspacesConnectionStatus": { @@ -5983,7 +5996,7 @@ "IngestionProcess": { "target": "com.amazonaws.workspaces#WorkspaceImageIngestionProcess", "traits": { - "smithy.api#documentation": "

The ingestion process to be used when importing the image, depending on which protocol\n you want to use for your BYOL Workspace image, either PCoIP, WorkSpaces Streaming Protocol \n (WSP), or bring your own protocol (BYOP). To use WSP, specify a value that ends in \n _WSP. To use PCoIP, specify a value that does not end in _WSP. \n To use BYOP, specify a value that ends in _BYOP.

\n

For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify \n BYOL_REGULAR, BYOL_REGULAR_WSP, or BYOL_REGULAR_BYOP, \n depending on the protocol.

\n \n

The BYOL_REGULAR_BYOP and BYOL_GRAPHICS_G4DN_BYOP values\n are only supported by Amazon WorkSpaces Core. Contact your account team to be\n allow-listed to use these values. For more information, see Amazon WorkSpaces Core.

\n
", + "smithy.api#documentation": "

The ingestion process to be used when importing the image, depending on which protocol\n you want to use for your BYOL Workspace image, either PCoIP, DCV, or \n bring your own protocol (BYOP). To use WSP, specify a value that ends in \n _DCV. To use PCoIP, specify a value that does not end in _DCV. \n To use BYOP, specify a value that ends in _BYOP.

\n

For non-GPU-enabled bundles (bundles other than Graphics or GraphicsPro), specify \n BYOL_REGULAR, BYOL_REGULAR_DCV, or BYOL_REGULAR_BYOP, \n depending on the protocol.

\n \n

The BYOL_REGULAR_BYOP and BYOL_GRAPHICS_G4DN_BYOP values\n are only supported by Amazon WorkSpaces Core. Contact your account team to be\n allow-listed to use these values. For more information, see Amazon WorkSpaces Core.

\n
", "smithy.api#required": {} } }, @@ -6010,7 +6023,7 @@ "Applications": { "target": "com.amazonaws.workspaces#ApplicationList", "traits": { - "smithy.api#documentation": "

If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11\n BYOL images. For more information about subscribing to Office for BYOL images, see Bring\n Your Own Windows Desktop Licenses.

\n \n
    \n
  • \n

    Although this parameter is an array, only one item is allowed at this\n time.

    \n
  • \n
  • \n

    During the image import process, non-GPU WSP WorkSpaces with Windows 11 support\n only Microsoft_Office_2019. GPU WSP WorkSpaces with Windows 11 do not\n support Office installation.

    \n
  • \n
\n
" + "smithy.api#documentation": "

If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11\n BYOL images. For more information about subscribing to Office for BYOL images, see Bring\n Your Own Windows Desktop Licenses.

\n \n
    \n
  • \n

    Although this parameter is an array, only one item is allowed at this\n time.

    \n
  • \n
  • \n

    During the image import process, non-GPU DCV (formerly WSP) WorkSpaces with Windows 11 support\n only Microsoft_Office_2019. GPU DCV (formerly WSP) WorkSpaces with Windows 11 do not\n support Office installation.

    \n
  • \n
\n
" } } }, @@ -10814,6 +10827,24 @@ "traits": { "smithy.api#enumValue": "UEFINotSupported" } + }, + "UNKNOWN_ERROR": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "UnknownError" + } + }, + "APPX_PACKAGES_INSTALLED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "AppXPackagesInstalled" + } + }, + "RESERVED_STORAGE_IN_USE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ReservedStorageInUse" + } } } }, @@ -10992,7 +11023,7 @@ "Protocols": { "target": "com.amazonaws.workspaces#ProtocolList", "traits": { - "smithy.api#documentation": "

The protocol. For more information, see \n \n Protocols for Amazon WorkSpaces.

\n \n
    \n
  • \n

    Only available for WorkSpaces created with PCoIP bundles.

    \n
  • \n
  • \n

    The Protocols property is case sensitive. Ensure you use PCOIP or WSP.

    \n
  • \n
  • \n

    Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles \n (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn).

    \n
  • \n
\n
" + "smithy.api#documentation": "

The protocol. For more information, see \n \n Protocols for Amazon WorkSpaces.

\n \n
    \n
  • \n

    Only available for WorkSpaces created with PCoIP bundles.

    \n
  • \n
  • \n

    The Protocols property is case sensitive. Ensure you use PCOIP or DCV (formerly WSP).

    \n
  • \n
  • \n

    Unavailable for Windows 7 WorkSpaces and WorkSpaces using GPU-based bundles \n (Graphics, GraphicsPro, Graphics.g4dn, and GraphicsPro.g4dn).

    \n
  • \n
\n
" } }, "OperatingSystemName": {