diff --git a/src/aws.ts b/src/aws.ts index 37bf4cbc043..561ae40fa3f 100644 --- a/src/aws.ts +++ b/src/aws.ts @@ -903,6 +903,24 @@ const completionSpec: Fig.Spec = { "Amazon GameLift provides solutions for hosting session-based multiplayer game servers in the cloud, including tools for deploying, operating, and scaling game servers. Built on Amazon Web Services global computing infrastructure, GameLift helps you deliver high-performance, high-reliability, low-cost game servers while dynamically scaling your resource usage to meet player demand. About Amazon GameLift solutions Get more information on these Amazon GameLift solutions in the Amazon GameLift Developer Guide. Amazon GameLift managed hosting -- Amazon GameLift offers a fully managed service to set up and maintain computing machines for hosting, manage game session and player session life cycle, and handle security, storage, and performance tracking. You can use automatic scaling tools to balance player demand and hosting costs, configure your game session management to minimize player latency, and add FlexMatch for matchmaking. Managed hosting with Realtime Servers -- With Amazon GameLift Realtime Servers, you can quickly configure and set up ready-to-go game servers for your game. Realtime Servers provides a game server framework with core Amazon GameLift infrastructure already built in. Then use the full range of Amazon GameLift managed hosting features, including FlexMatch, for your game. Amazon GameLift FleetIQ -- Use Amazon GameLift FleetIQ as a standalone service while hosting your games using EC2 instances and Auto Scaling groups. Amazon GameLift FleetIQ provides optimizations for game hosting, including boosting the viability of low-cost Spot Instances gaming. For a complete solution, pair the Amazon GameLift FleetIQ and FlexMatch standalone services. Amazon GameLift FlexMatch -- Add matchmaking to your game hosting solution. FlexMatch is a customizable matchmaking service for multiplayer games. Use FlexMatch as integrated with Amazon GameLift managed hosting or incorporate FlexMatch as a standalone service into your own hosting solution. About this API Reference This reference guide describes the low-level service API for Amazon GameLift. With each topic in this guide, you can find links to language-specific SDK guides and the Amazon Web Services CLI reference. Useful links: Amazon GameLift API operations listed by tasks Amazon GameLift tools and resources", loadSpec: "aws/gamelift", }, + { + name: "geo-maps", + description: + "Integrate high-quality base map data into your applications using MapLibre. Capabilities include: Access to comprehensive base map data, allowing you to tailor the map display to your specific needs. Multiple pre-designed map styles suited for various application types, such as navigation, logistics, or data visualization. Generation of static map images for scenarios where interactive maps aren't suitable, such as: Embedding in emails or documents Displaying in low-bandwidth environments Creating printable maps Enhancing application performance by reducing client-side rendering", + loadSpec: "aws/geo-maps", + }, + { + name: "geo-places", + description: + "The Places API enables powerful location search and geocoding capabilities for your applications, offering global coverage with rich, detailed information. Key features include: Forward and reverse geocoding for addresses and coordinates Comprehensive place searches with detailed information, including: Business names and addresses Contact information Hours of operation POI (Points of Interest) categories Food types for restaurants Chain affiliation for relevant businesses Global data coverage with a wide range of POI categories Regular data updates to ensure accuracy and relevance", + loadSpec: "aws/geo-places", + }, + { + name: "geo-routes", + description: + "With the Amazon Location Routes API you can calculate routes and estimate travel time based on up-to-date road network and live traffic information. Calculate optimal travel routes and estimate travel times using up-to-date road network and traffic data. Key features include: Point-to-point routing with estimated travel time, distance, and turn-by-turn directions Multi-point route optimization to minimize travel time or distance Route matrices for efficient multi-destination planning Isoline calculations to determine reachable areas within specified time or distance thresholds Map-matching to align GPS traces with the road network", + loadSpec: "aws/geo-routes", + }, { name: "glacier", description: @@ -1483,7 +1501,7 @@ const completionSpec: Fig.Spec = { { name: "mwaa", description: - "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", + "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken InvokeRestApi Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", loadSpec: "aws/mwaa", }, { @@ -1507,7 +1525,7 @@ const completionSpec: Fig.Spec = { { name: "network-firewall", description: - "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 6.0.9. For information about Suricata, see the Suricata website. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", + "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", loadSpec: "aws/network-firewall", }, { @@ -1522,12 +1540,6 @@ const completionSpec: Fig.Spec = { "Amazon CloudWatch Network Monitor is an Amazon Web Services active network monitoring service that identifies if a network issues exists within the Amazon Web Services network or your own company network. Within Network Monitor you'll choose the source VPCs and subnets from the Amazon Web Services network in which you operate and then you'll choose the destination IP addresses from your on-premises network. From these sources and destinations, Network Monitor creates a monitor containing all the possible source and destination combinations, each of which is called a probe, within a single monitor. These probes then monitor network traffic to help you identify where network issues might be affecting your traffic. Before you begin, ensure the Amazon Web Services CLI is configured in the Amazon Web Services Account where you will create the Network Monitor resource. Network Monitor doesn\u2019t support creation on cross-account resources, but you can create a Network Monitor in any subnet belonging to a VPC owned by your Account. For more information, see Using Amazon CloudWatch Network Monitor in the Amazon CloudWatch User Guide", loadSpec: "aws/networkmonitor", }, - { - name: "nimble", - description: - "Welcome to the Amazon Nimble Studio API reference. This API reference provides methods, schema, resources, parameters, and more to help you get the most out of Nimble Studio. Nimble Studio is a virtual studio that empowers visual effects, animation, and interactive content teams to create content securely within a scalable, private cloud service", - loadSpec: "aws/nimble", - }, { name: "oam", description: @@ -2115,7 +2127,7 @@ const completionSpec: Fig.Spec = { { name: "storagegateway", description: - "Storage Gateway Service Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", + "Storage Gateway Service Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post. Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", loadSpec: "aws/storagegateway", }, { diff --git a/src/aws/amp.ts b/src/aws/amp.ts index 23f33d62ce2..ab4c003f0ed 100644 --- a/src/aws/amp.ts +++ b/src/aws/amp.ts @@ -67,7 +67,7 @@ const completionSpec: Fig.Spec = { { name: "--log-group-arn", description: - "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this API", + "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation", args: { name: "string", }, @@ -165,12 +165,12 @@ const completionSpec: Fig.Spec = { { name: "create-scraper", description: - "The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. You can configure the scraper to control what metrics are collected, and what transformations are applied prior to sending them to your workspace. If needed, an IAM role will be created for you that gives Amazon Managed Service for Prometheus access to the metrics in your cluster. For more information, see Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User Guide. You cannot update a scraper. If you want to change the configuration of the scraper, create a new scraper and delete the old one. The scrapeConfiguration parameter contains the base64-encoded version of the YAML configuration file. For more information about collectors, including what metrics are collected, and how to configure the scraper, see Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User Guide", + "The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. Scrapers are flexible, and can be configured to control what metrics are collected, the frequency of collection, what transformations are applied to the metrics, and more. An IAM role will be created for you that Amazon Managed Service for Prometheus uses to access the metrics in your cluster. You must configure this role with a policy that allows it to scrape metrics from your cluster. For more information, see Configuring your Amazon EKS cluster in the Amazon Managed Service for Prometheus User Guide. The scrapeConfiguration parameter contains the base-64 encoded YAML configuration for the scraper. For more information about collectors, including what metrics are collected, and how to configure the scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide", options: [ { name: "--alias", description: - "(optional) a name to associate with the scraper. This is for your use, and does not need to be unique", + "(optional) An alias to associate with the scraper. This is for your use, and does not need to be unique", args: { name: "string", }, @@ -836,12 +836,12 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are workspaces and rule groups namespaces", + "The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are scrapers, workspaces, and rule groups namespaces", options: [ { name: "--resource-arn", description: - "The ARN of the resource to list tages for. Must be a workspace or rule groups namespace resource", + "The ARN of the resource to list tages for. Must be a workspace, scraper, or rule groups namespace resource", args: { name: "string", }, @@ -1044,12 +1044,11 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag", + "The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. To remove a tag, use UntagResource", options: [ { name: "--resource-arn", - description: - "The ARN of the workspace or rule groups namespace to apply tags to", + description: "The ARN of the resource to apply tags to", args: { name: "string", }, @@ -1057,7 +1056,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "The list of tag keys and values to associate with the resource. Keys may not begin with aws:", + "The list of tag keys and values to associate with the resource. Keys must not begin with aws:", args: { name: "map", }, @@ -1084,11 +1083,11 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces", + "Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces", options: [ { name: "--resource-arn", - description: "The ARN of the workspace or rule groups namespace", + description: "The ARN of the resource from which to remove a tag", args: { name: "string", }, @@ -1167,6 +1166,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-scraper", + description: + "Updates an existing scraper. You can't use this function to update the source from which the scraper is collecting metrics. To change the source, delete the scraper and create a new one", + options: [ + { + name: "--alias", + description: "The new alias of the scraper", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: + "A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The new Amazon Managed Service for Prometheus workspace to send metrics to", + args: { + name: "structure", + }, + }, + { + name: "--scrape-configuration", + description: + "Contains the base-64 encoded YAML configuration for the scraper. For more information about configuring a scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide", + args: { + name: "structure", + }, + }, + { + name: "--scraper-id", + description: "The ID of the scraper to update", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-workspace-alias", description: "Updates the alias of an existing workspace", diff --git a/src/aws/appconfig.ts b/src/aws/appconfig.ts index 0f06aff409c..14bced44457 100644 --- a/src/aws/appconfig.ts +++ b/src/aws/appconfig.ts @@ -852,7 +852,7 @@ const completionSpec: Fig.Spec = { { name: "--client-configuration-version", description: - "The configuration version returned in the most recent GetConfiguration response. AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don\u2019t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration. To avoid excess charges, we recommend you use the StartConfigurationSession and GetLatestConfiguration APIs, which track the client configuration version on your behalf. If you choose to continue using GetConfiguration, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. The value to use for ClientConfigurationVersion comes from the ConfigurationVersion attribute returned by GetConfiguration when there is new or updated data, and should be saved for subsequent calls to GetConfiguration. For more information about working with configurations, see Retrieving the Configuration in the AppConfig User Guide", + "The configuration version returned in the most recent GetConfiguration response. AppConfig uses the value of the ClientConfigurationVersion parameter to identify the configuration version on your clients. If you don\u2019t send ClientConfigurationVersion with each call to GetConfiguration, your clients receive the current configuration. You are charged each time your clients receive a configuration. To avoid excess charges, we recommend you use the StartConfigurationSession and GetLatestConfiguration APIs, which track the client configuration version on your behalf. If you choose to continue using GetConfiguration, we recommend that you include the ClientConfigurationVersion value with every call to GetConfiguration. The value to use for ClientConfigurationVersion comes from the ConfigurationVersion attribute returned by GetConfiguration when there is new or updated data, and should be saved for subsequent calls to GetConfiguration. For more information about working with configurations, see Retrieving feature flags and configuration data in AppConfig in the AppConfig User Guide", args: { name: "string", }, @@ -1847,7 +1847,7 @@ const completionSpec: Fig.Spec = { { name: "stop-deployment", description: - "Stops a deployment. This API action works only on deployments that have a status of DEPLOYING. This action moves the deployment to a status of ROLLED_BACK", + "Stops a deployment. This API action works only on deployments that have a status of DEPLOYING, unless an AllowRevert parameter is supplied. If the AllowRevert parameter is supplied, the status of an in-progress deployment will be ROLLED_BACK. The status of a completed deployment will be REVERTED. AppConfig only allows a revert within 72 hours of deployment completion", options: [ { name: "--application-id", @@ -1870,6 +1870,16 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--allow-revert", + description: + "A Boolean that enables AppConfig to rollback a COMPLETED deployment to the previous configuration version. This action moves the deployment to a status of REVERTED", + }, + { + name: "--no-allow-revert", + description: + "A Boolean that enables AppConfig to rollback a COMPLETED deployment to the previous configuration version. This action moves the deployment to a status of REVERTED", + }, { name: "--cli-input-json", description: diff --git a/src/aws/appsync.ts b/src/aws/appsync.ts index 1384272163a..2fa244ae639 100644 --- a/src/aws/appsync.ts +++ b/src/aws/appsync.ts @@ -149,6 +149,60 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-api", + description: + "Creates an Api object. Use this operation to create an AppSync API with your preferred configuration, such as an Event API that provides real-time message publishing and message subscriptions over WebSockets", + options: [ + { + name: "--name", + description: "The name for the Api", + args: { + name: "string", + }, + }, + { + name: "--owner-contact", + description: "The owner contact information for the Api", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "A map with keys of TagKey objects and values of TagValue objects", + args: { + name: "map", + }, + }, + { + name: "--event-config", + description: + "The Event API configuration. This includes the default authorization configuration for connecting, publishing, and subscribing to an Event API", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-api-cache", description: "Creates a cache for the GraphQL API", @@ -277,6 +331,76 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-channel-namespace", + description: "Creates a ChannelNamespace for an Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "The name of the ChannelNamespace. This name must be unique within the Api", + args: { + name: "string", + }, + }, + { + name: "--subscribe-auth-modes", + description: + "The authorization mode to use for subscribing to messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--publish-auth-modes", + description: + "The authorization mode to use for publishing messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--code-handlers", + description: + "The event handler functions that run custom business logic to process published events and subscribe requests", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: + "A map with keys of TagKey objects and values of TagValue objects", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-data-source", description: "Creates a DataSource object", @@ -619,25 +743,25 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--visibility", + name: "--api-type", description: - "Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created", + "The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED)", args: { name: "string", }, }, { - name: "--api-type", + name: "--merged-api-execution-role-arn", description: - "The value that indicates whether the GraphQL API is a standard API (GRAPHQL) or merged API (MERGED)", + "The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically", args: { name: "string", }, }, { - name: "--merged-api-execution-role-arn", + name: "--visibility", description: - "The Identity and Access Management service role ARN for a merged API. The AppSync service assumes this role on behalf of the Merged API to validate access to source APIs at runtime and to prompt the AUTO_MERGE to update the merged API endpoint with the source API changes automatically", + "Sets the value of the GraphQL API to public (GLOBAL) or private (PRIVATE). If no value is provided, the visibility will be set to GLOBAL by default. This value cannot be changed once the API has been created", args: { name: "string", }, @@ -876,6 +1000,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-api", + description: "Deletes an Api object", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-api-cache", description: "Deletes an ApiCache object", @@ -943,6 +1097,43 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-channel-namespace", + description: "Deletes a ChannelNamespace", + options: [ + { + name: "--api-id", + description: "The ID of the Api associated with the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-source", description: "Deletes a DataSource object", @@ -1394,6 +1585,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-api", + description: "Retrieves an Api object", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-api-association", description: "Retrieves an ApiAssociation object", @@ -1454,6 +1675,43 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-channel-namespace", + description: "Retrieves the channel namespace for a specified Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-data-source", description: "Retrieves a DataSource object", @@ -1507,12 +1765,12 @@ const completionSpec: Fig.Spec = { { name: "--include-models-sdl", description: - "A boolean flag that determines whether SDL should be generated for introspected types or not. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", + "A boolean flag that determines whether SDL should be generated for introspected types. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", }, { name: "--no-include-models-sdl", description: - "A boolean flag that determines whether SDL should be generated for introspected types or not. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", + "A boolean flag that determines whether SDL should be generated for introspected types. If set to true, each model will contain an sdl property that contains the SDL for that type. The SDL only contains the type data and no additional metadata or directives", }, { name: "--next-token", @@ -1944,6 +2202,141 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-apis", + description: + "Lists the APIs in your AppSync account. ListApis returns only the high level API details. For more detailed information about an API, use GetApi", + options: [ + { + name: "--next-token", + description: + "An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that you want the request to return", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-channel-namespaces", + description: + "Lists the channel namespaces for a specified Api. ListChannelNamespaces returns only high level details for the channel namespace. To retrieve code handlers, use GetChannelNamespace", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "An identifier that was returned from the previous call to this operation, which you can use to return the next set of items in the list", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of results that you want the request to return", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-data-sources", description: "Lists the data sources for a given API", @@ -2867,6 +3260,58 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-api", + description: "Updates an Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the Api", + args: { + name: "string", + }, + }, + { + name: "--owner-contact", + description: "The owner contact information for the Api", + args: { + name: "string", + }, + }, + { + name: "--event-config", + description: + "The new event configuration. This includes the default authorization configuration for connecting, publishing, and subscribing to an Event API", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-api-cache", description: "Updates the cache for the GraphQL API", @@ -2982,6 +3427,67 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-channel-namespace", + description: "Updates a ChannelNamespace associated with an Api", + options: [ + { + name: "--api-id", + description: "The Api ID", + args: { + name: "string", + }, + }, + { + name: "--name", + description: "The name of the ChannelNamespace", + args: { + name: "string", + }, + }, + { + name: "--subscribe-auth-modes", + description: + "The authorization mode to use for subscribing to messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--publish-auth-modes", + description: + "The authorization mode to use for publishing messages on the channel namespace. This configuration overrides the default Api authorization configuration", + args: { + name: "list", + }, + }, + { + name: "--code-handlers", + description: + "The event handler functions that run custom business logic to process published events and subscribe requests", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-data-source", description: "Updates a DataSource object", diff --git a/src/aws/autoscaling.ts b/src/aws/autoscaling.ts index 9175b71f2eb..a26d246d307 100644 --- a/src/aws/autoscaling.ts +++ b/src/aws/autoscaling.ts @@ -560,6 +560,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--availability-zone-distribution", + description: + "The instance capacity distribution across Availability Zones", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -3437,7 +3445,7 @@ const completionSpec: Fig.Spec = { { name: "--preferences", description: - "Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following: Auto rollback Checkpoints CloudWatch alarms Skip matching", + "Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following: Auto rollback Checkpoints CloudWatch alarms Skip matching Bake time", args: { name: "structure", }, @@ -3722,6 +3730,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--availability-zone-distribution", + description: + "The instance capacity distribution across Availability Zones", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/batch.ts b/src/aws/batch.ts index a2f3e8ef35b..b00c8c89317 100644 --- a/src/aws/batch.ts +++ b/src/aws/batch.ts @@ -18,7 +18,7 @@ const completionSpec: Fig.Spec = { { name: "--reason", description: - "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs", + "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs. This parameter has as limit of 1024 characters", args: { name: "string", }, @@ -161,7 +161,7 @@ const completionSpec: Fig.Spec = { { name: "--scheduling-policy-arn", description: - "The Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter is specified, the job queue uses a fair share scheduling policy. If this parameter isn't specified, the job queue uses a first in, first out (FIFO) scheduling policy. After a job queue is created, you can replace but can't remove the fair share scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy", + "The Amazon Resource Name (ARN) of the fair share scheduling policy. Job queues that don't have a scheduling policy are scheduled in a first-in, first-out (FIFO) model. After a job queue has a scheduling policy, it can be replaced but can't be removed. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy. A job queue without a scheduling policy is scheduled as a FIFO job queue and can't have a scheduling policy added. Jobs queues with a scheduling policy can have a maximum of 500 active fair share identifiers. When the limit has been reached, submissions of any jobs that add a new fair share identifier fail", args: { name: "string", }, @@ -193,7 +193,7 @@ const completionSpec: Fig.Spec = { { name: "--job-state-time-limit-actions", description: - "The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed", + "The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)", args: { name: "list", }, @@ -1250,7 +1250,7 @@ const completionSpec: Fig.Spec = { { name: "--reason", description: - "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs", + "A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs. This parameter has as limit of 1024 characters", args: { name: "string", }, @@ -1437,7 +1437,7 @@ const completionSpec: Fig.Spec = { { name: "--job-state-time-limit-actions", description: - "The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed", + "The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)", args: { name: "list", }, diff --git a/src/aws/bedrock-agent.ts b/src/aws/bedrock-agent.ts index 75d4afe7a95..a44616c23f1 100644 --- a/src/aws/bedrock-agent.ts +++ b/src/aws/bedrock-agent.ts @@ -113,7 +113,7 @@ const completionSpec: Fig.Spec = { { name: "--foundation-model", description: - "The Amazon Resource Name (ARN) of the foundation model to be used for orchestration by the agent you create", + "The identifier for the model that you want to be used for orchestration by the agent you create. The modelId to provide depends on the type of model or throughput that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console", args: { name: "string", }, @@ -2967,7 +2967,7 @@ const completionSpec: Fig.Spec = { { name: "--foundation-model", description: - "Specifies a new foundation model to be used for orchestration by the agent", + "The identifier for the model that you want to be used for orchestration by the agent you create. The modelId to provide depends on the type of model or throughput that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the imported model. You can get the model ARN from a successful call to CreateModelImportJob or from the Imported models page in the Amazon Bedrock console", args: { name: "string", }, @@ -3605,6 +3605,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "validate-flow-definition", + description: "Validates the definition of a flow", + options: [ + { + name: "--definition", + description: "The definition of a flow to validate", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, ], }; export default completionSpec; diff --git a/src/aws/bedrock-runtime.ts b/src/aws/bedrock-runtime.ts index 62c1b298ee9..02fa18589b2 100644 --- a/src/aws/bedrock-runtime.ts +++ b/src/aws/bedrock-runtime.ts @@ -61,12 +61,12 @@ const completionSpec: Fig.Spec = { { name: "converse", description: - "Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model. Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action", + "Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. If a model has unique inference parameters, you can also pass those unique parameters to the model. Amazon Bedrock doesn't store any text, images, or documents that you provide as content. The data is only used to generate the response. You can submit a prompt by including it in the messages field, specifying the modelId of a foundation model or inference profile to run inference on it, and including any other fields that are relevant to your use case. You can also submit a prompt from Prompt management by specifying the ARN of the prompt version and including a map of variables to values in the promptVariables field. You can append more messages to the prompt by using the messages field. If you use a prompt from Prompt management, you can't include the following fields in the request: additionalModelRequestFields, inferenceConfig, system, or toolConfig. Instead, these fields must be defined through Prompt management. For more information, see Use a prompt from Prompt management. For information about the Converse API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action", options: [ { name: "--model-id", description: - "The identifier for the model that you want to call. The modelId to provide depends on the type of model or throughput that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. The Converse API doesn't support imported models", + "Specifies the model or throughput with which to run inference, or the prompt resource to use in inference. The value depends on the resource that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and models for cross-region inference in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. To include a prompt that was defined in Prompt management, specify the ARN of the prompt version to use. The Converse API doesn't support imported models", args: { name: "string", }, @@ -80,7 +80,8 @@ const completionSpec: Fig.Spec = { }, { name: "--system", - description: "A system prompt to pass to the model", + description: + "A prompt that provides instructions or context to the model about the task it should perform, or the persona it should adopt during the conversation", args: { name: "list", }, @@ -88,7 +89,7 @@ const completionSpec: Fig.Spec = { { name: "--inference-config", description: - "Inference parameters to pass to the model. Converse supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field", + "Inference parameters to pass to the model. Converse and ConverseStream support a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field", args: { name: "structure", }, @@ -104,7 +105,7 @@ const completionSpec: Fig.Spec = { { name: "--guardrail-config", description: - "Configuration information for a guardrail that you want to use in the request", + "Configuration information for a guardrail that you want to use in the request. If you include guardContent blocks in the content field in the messages field, the guardrail operates only on those messages. If you include no guardContent blocks, the guardrail operates on all messages in the request body and in any included prompt resource", args: { name: "structure", }, @@ -112,15 +113,23 @@ const completionSpec: Fig.Spec = { { name: "--additional-model-request-fields", description: - "Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse supports in the inferenceConfig field. For more information, see Model parameters", + "Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse and ConverseStream support in the inferenceConfig field. For more information, see Model parameters", args: { name: "structure", }, }, + { + name: "--prompt-variables", + description: + "Contains a map of variables in a prompt from Prompt management to objects containing the values to fill in for them when running model invocation. This field is ignored if you don't specify a prompt resource in the modelId field", + args: { + name: "map", + }, + }, { name: "--additional-model-response-field-paths", description: - 'Additional model parameters field paths to return in the response. Converse returns the requested fields as a JSON Pointer object in the additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths. [ "/stop_sequence" ] For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. Converse rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse', + 'Additional model parameters field paths to return in the response. Converse and ConverseStream return the requested fields as a JSON Pointer object in the additionalModelResponseFields field. The following is example JSON for additionalModelResponseFieldPaths. [ "/stop_sequence" ] For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. Converse and ConverseStream reject an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse', args: { name: "list", }, diff --git a/src/aws/bedrock.ts b/src/aws/bedrock.ts index 19cb1a68bff..05658ad12f2 100644 --- a/src/aws/bedrock.ts +++ b/src/aws/bedrock.ts @@ -290,6 +290,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-inference-profile", + description: + "Creates an application inference profile to track metrics and costs when invoking a model. To create an application inference profile for a foundation model in one region, specify the ARN of the model in that region. To create an application inference profile for a foundation model across multiple regions, specify the ARN of the system-defined inference profile that contains the regions that you want to route requests to. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", + options: [ + { + name: "--inference-profile-name", + description: "A name for the inference profile", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "A description for the inference profile", + args: { + name: "string", + }, + }, + { + name: "--client-request-token", + description: + "A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency", + args: { + name: "string", + }, + }, + { + name: "--model-source", + description: + "The foundation model or system-defined inference profile that the inference profile will track metrics and costs for", + args: { + name: "structure", + }, + }, + { + name: "--tags", + description: + "An array of objects, each of which contains a tag and its value. For more information, see Tagging resources in the Amazon Bedrock User Guide", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-model-copy-job", description: @@ -834,6 +896,38 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-inference-profile", + description: + "Deletes an application inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", + options: [ + { + name: "--inference-profile-identifier", + description: + "The Amazon Resource Name (ARN) or ID of the application inference profile to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-model-invocation-logging-configuration", description: "Delete the invocation logging", @@ -1057,11 +1151,12 @@ const completionSpec: Fig.Spec = { { name: "get-inference-profile", description: - "Gets information about an inference profile. For more information, see the Amazon Bedrock User Guide", + "Gets information about an inference profile. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", options: [ { name: "--inference-profile-identifier", - description: "The unique identifier of the inference profile", + description: + "The ID or Amazon Resource Name (ARN) of the inference profile", args: { name: "string", }, @@ -1733,7 +1828,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-inference-profiles", - description: "Returns a list of inference profiles that you can use", + description: + "Returns a list of inference profiles that you can use. For more information, see Increase throughput and resilience with cross-region inference in Amazon Bedrock. in the Amazon Bedrock User Guide", options: [ { name: "--max-results", @@ -1751,6 +1847,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--type-equals", + description: + "Filters for inference profiles that match the type you specify. SYSTEM_DEFINED \u2013 The inference profile is defined by Amazon Bedrock. You can route inference requests across regions with these inference profiles. APPLICATION \u2013 The inference profile was created by a user. This type of inference profile can track metrics and costs when invoking the model in it. The inference profile may route requests to one or multiple regions", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/chime-sdk-media-pipelines.ts b/src/aws/chime-sdk-media-pipelines.ts index 4d130c3f641..0d884a775ad 100644 --- a/src/aws/chime-sdk-media-pipelines.ts +++ b/src/aws/chime-sdk-media-pipelines.ts @@ -54,6 +54,22 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--sse-aws-key-management-params", + description: + "An object that contains server side encryption parameters to be used by media capture pipeline. The parameters can also be used by media concatenation pipeline taking media capture pipeline as a media source", + args: { + name: "structure", + }, + }, + { + name: "--sink-iam-role-arn", + description: + "The Amazon Resource Name (ARN) of the sink role to be used with AwsKmsKeyId in SseAwsKeyManagementParams. Can only interact with S3Bucket sink type. The role must belong to the caller\u2019s account and be able to act on behalf of the caller during the API call. All minimum policy permissions requirements for the caller to perform sink-related actions are the same for SinkIamRoleArn. Additionally, the role must have permission to kms:GenerateDataKey using KMS key supplied as AwsKmsKeyId in SseAwsKeyManagementParams. If media concatenation will be required later, the role must also have permission to kms:Decrypt for the same KMS key", + args: { + name: "string", + }, + }, { name: "--tags", description: "The tag key-value pairs", diff --git a/src/aws/cleanrooms.ts b/src/aws/cleanrooms.ts index dc603351c15..99177e8dd1c 100644 --- a/src/aws/cleanrooms.ts +++ b/src/aws/cleanrooms.ts @@ -228,6 +228,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--creator-ml-member-abilities", + description: + "The ML abilities granted to the collaboration creator. Custom ML modeling is in beta release and is subject to change. For beta terms and conditions, see Betas and Previews in the Amazon Web Services Service Terms", + args: { + name: "structure", + }, + }, { name: "--creator-display-name", description: "The display name of the collaboration creator", @@ -267,6 +275,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--analytics-engine", + description: "The analytics engine", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2014,7 +2029,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2022,7 +2037,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2085,7 +2100,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2093,7 +2108,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2157,7 +2172,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2165,7 +2180,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2300,7 +2315,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2308,7 +2323,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2378,7 +2393,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2386,7 +2401,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2442,7 +2457,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2450,7 +2465,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2521,7 +2536,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2529,7 +2544,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2592,7 +2607,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2600,7 +2615,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2655,7 +2670,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2663,7 +2678,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2868,7 +2883,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2876,7 +2891,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -2932,7 +2947,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -2940,7 +2955,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3012,7 +3027,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3020,7 +3035,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3091,7 +3106,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3099,7 +3114,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service may return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3168,7 +3183,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3176,7 +3191,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call. Service chooses a default if it has not been set. Service can return a nextToken even if the maximum results has not been met", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3238,8 +3253,7 @@ const completionSpec: Fig.Spec = { }, { name: "--schema-type", - description: - "If present, filter schemas by schema type. The only valid schema type is currently `TABLE`", + description: "If present, filter schemas by schema type", args: { name: "string", }, @@ -3247,7 +3261,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "The token value retrieved from a previous call to access the next page of results", + "The pagination token that's used to fetch the next set of results", args: { name: "string", }, @@ -3255,7 +3269,7 @@ const completionSpec: Fig.Spec = { { name: "--max-results", description: - "The maximum size of the results that is returned per call", + "The maximum number of results that are returned for an API request call. The service chooses a default number if you don't set one. The service might return a `nextToken` even if the `maxResults` value has not been met", args: { name: "integer", }, @@ -3447,6 +3461,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--compute-configuration", + description: "The compute configuration for the protected query", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/cleanroomsml.ts b/src/aws/cleanroomsml.ts index 3187d3191e8..d5e817a10ea 100644 --- a/src/aws/cleanroomsml.ts +++ b/src/aws/cleanroomsml.ts @@ -3,6 +3,84 @@ const completionSpec: Fig.Spec = { description: "Welcome to the Amazon Web Services Clean Rooms ML API Reference. Amazon Web Services Clean Rooms ML provides a privacy-enhancing method for two parties to identify similar users in their data without the need to share their data with each other. The first party brings the training data to Clean Rooms so that they can create and configure an audience model (lookalike model) and associate it with a collaboration. The second party then brings their seed data to Clean Rooms and generates an audience (lookalike segment) that resembles the training data. To learn more about Amazon Web Services Clean Rooms ML concepts, procedures, and best practices, see the Clean Rooms User Guide. To learn more about SQL commands, functions, and conditions supported in Clean Rooms, see the Clean Rooms SQL Reference", subcommands: [ + { + name: "cancel-trained-model", + description: "Submits a request to cancel the trained model job", + options: [ + { + name: "--membership-identifier", + description: + "The membership ID of the trained model job that you want to cancel", + args: { + name: "string", + }, + }, + { + name: "--trained-model-arn", + description: + "The Amazon Resource Name (ARN) of the trained model job that you want to cancel", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "cancel-trained-model-inference-job", + description: "Submits a request to cancel a trained model inference job", + options: [ + { + name: "--membership-identifier", + description: + "The membership ID of the trained model inference job that you want to cancel", + args: { + name: "string", + }, + }, + { + name: "--trained-model-inference-job-arn", + description: + "The Amazon Resource Name (ARN) of the trained model inference job that you want to cancel", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-audience-model", description: @@ -173,14 +251,20 @@ const completionSpec: Fig.Spec = { ], }, { - name: "create-training-dataset", + name: "create-configured-model-algorithm", description: - "Defines the information necessary to create a training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation", + "Creates a configured model algorithm using a container image stored in an ECR repository", options: [ { name: "--name", - description: - "The name of the training dataset. This name must be unique in your account and region", + description: "The name of the configured model algorithm", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "The description of the configured model algorithm", args: { name: "string", }, @@ -188,17 +272,25 @@ const completionSpec: Fig.Spec = { { name: "--role-arn", description: - "The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the dataSource field of each dataset. Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an AccessDeniedException error", + "The Amazon Resource Name (ARN) of the role that is used to access the repository", args: { name: "string", }, }, { - name: "--training-data", + name: "--training-container-config", description: - "An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables", + "Configuration information for the training container, including entrypoints and arguments", args: { - name: "list", + name: "structure", + }, + }, + { + name: "--inference-container-config", + description: + "Configuration information for the inference container that is used when you run an inference job on a configured model algorithm", + args: { + name: "structure", }, }, { @@ -210,8 +302,9 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--description", - description: "The description of the training dataset", + name: "--kms-key-arn", + description: + "The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the configured ML model algorithm and associated data", args: { name: "string", }, @@ -236,50 +329,57 @@ const completionSpec: Fig.Spec = { ], }, { - name: "delete-audience-generation-job", + name: "create-configured-model-algorithm-association", description: - "Deletes the specified audience generation job, and removes all data associated with the job", + "Associates a configured model algorithm to a collaboration for use by any member of the collaboration", options: [ { - name: "--audience-generation-job-arn", + name: "--membership-identifier", description: - "The Amazon Resource Name (ARN) of the audience generation job that you want to delete", + "The membership ID of the member who is associating this configured model algorithm", args: { name: "string", }, }, { - name: "--cli-input-json", + name: "--configured-model-algorithm-arn", description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + "The Amazon Resource Name (ARN) of the configured model algorithm that you want to associate", args: { name: "string", }, }, { - name: "--generate-cli-skeleton", - description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + name: "--name", + description: "The name of the configured model algorithm association", args: { name: "string", - suggestions: ["input", "output"], }, }, - ], - }, - { - name: "delete-audience-model", - description: - "Specifies an audience model that you want to delete. You can't delete an audience model if there are any configured audience models that depend on the audience model", - options: [ { - name: "--audience-model-arn", + name: "--description", description: - "The Amazon Resource Name (ARN) of the audience model that you want to delete", + "The description of the configured model algorithm association", args: { name: "string", }, }, + { + name: "--privacy-configuration", + description: + "Specifies the privacy configuration information for the configured model algorithm association. This information includes the maximum data size that can be exported", + args: { + name: "structure", + }, + }, + { + name: "--tags", + description: + "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50. For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 Unicode characters in UTF-8. If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit", + args: { + name: "map", + }, + }, { name: "--cli-input-json", description: @@ -300,49 +400,72 @@ const completionSpec: Fig.Spec = { ], }, { - name: "delete-configured-audience-model", + name: "create-ml-input-channel", description: - "Deletes the specified configured audience model. You can't delete a configured audience model if there are any lookalike models that use the configured audience model. If you delete a configured audience model, it will be removed from any collaborations that it is associated to", + "Provides the information to create an ML input channel. An ML input channel is the result of a query that can be used for ML modeling", options: [ { - name: "--configured-audience-model-arn", + name: "--membership-identifier", description: - "The Amazon Resource Name (ARN) of the configured audience model that you want to delete", + "The membership ID of the member that is creating the ML input channel", args: { name: "string", }, }, { - name: "--cli-input-json", + name: "--configured-model-algorithm-associations", description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + "The associated configured model algorithms that are necessary to create this ML input channel", + args: { + name: "list", + }, + }, + { + name: "--input-channel", + description: + "The input data that is used to create this ML input channel", + args: { + name: "structure", + }, + }, + { + name: "--name", + description: "The name of the ML input channel", args: { name: "string", }, }, { - name: "--generate-cli-skeleton", + name: "--retention-in-days", description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + "The number of days that the data in the ML input channel is retained", + args: { + name: "integer", + }, + }, + { + name: "--description", + description: "The description of the ML input channel", args: { name: "string", - suggestions: ["input", "output"], }, }, - ], - }, - { - name: "delete-configured-audience-model-policy", - description: "Deletes the specified configured audience model policy", - options: [ { - name: "--configured-audience-model-arn", + name: "--kms-key-arn", description: - "The Amazon Resource Name (ARN) of the configured audience model policy that you want to delete", + "The Amazon Resource Name (ARN) of the KMS key that is used to access the input channel", args: { name: "string", }, }, + { + name: "--tags", + description: + "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50. For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 Unicode characters in UTF-8. If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit", + args: { + name: "map", + }, + }, { name: "--cli-input-json", description: @@ -363,80 +486,95 @@ const completionSpec: Fig.Spec = { ], }, { - name: "delete-training-dataset", + name: "create-trained-model", description: - "Specifies a training dataset that you want to delete. You can't delete a training dataset if there are any audience models that depend on the training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation. This action deletes the metadata", + "Creates a trained model from an associated configured model algorithm using data from any member of the collaboration", options: [ { - name: "--training-dataset-arn", + name: "--membership-identifier", description: - "The Amazon Resource Name (ARN) of the training dataset that you want to delete", + "The membership ID of the member that is creating the trained model", args: { name: "string", }, }, { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + name: "--name", + description: "The name of the trained model", args: { name: "string", }, }, { - name: "--generate-cli-skeleton", + name: "--configured-model-algorithm-association-arn", description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + "The associated configured model algorithm used to train this model", args: { name: "string", - suggestions: ["input", "output"], }, }, - ], - }, - { - name: "get-audience-generation-job", - description: "Returns information about an audience generation job", - options: [ { - name: "--audience-generation-job-arn", + name: "--hyperparameters", description: - "The Amazon Resource Name (ARN) of the audience generation job that you are interested in", + "Algorithm-specific parameters that influence the quality of the model. You set hyperparameters before you start the learning process", args: { - name: "string", + name: "map", }, }, { - name: "--cli-input-json", + name: "--environment", description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + "The environment variables to set in the Docker container", args: { - name: "string", + name: "map", }, }, { - name: "--generate-cli-skeleton", + name: "--resource-config", description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + "Information about the EC2 resources that are used to train this model", + args: { + name: "structure", + }, + }, + { + name: "--stopping-condition", + description: "The criteria that is used to stop model training", + args: { + name: "structure", + }, + }, + { + name: "--data-channels", + description: + "Defines the data channels that are used as input for the trained model request", + args: { + name: "list", + }, + }, + { + name: "--description", + description: "The description of the trained model", args: { name: "string", - suggestions: ["input", "output"], }, }, - ], - }, - { - name: "get-audience-model", - description: "Returns information about an audience model", - options: [ { - name: "--audience-model-arn", + name: "--kms-key-arn", description: - "The Amazon Resource Name (ARN) of the audience model that you are interested in", + "The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the trained ML model and the associated data", args: { name: "string", }, }, + { + name: "--tags", + description: + "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50. For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 Unicode characters in UTF-8. If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit", + args: { + name: "map", + }, + }, { name: "--cli-input-json", description: @@ -457,56 +595,1743 @@ const completionSpec: Fig.Spec = { ], }, { - name: "get-configured-audience-model", + name: "create-training-dataset", description: - "Returns information about a specified configured audience model", + "Defines the information necessary to create a training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation", options: [ { - name: "--configured-audience-model-arn", + name: "--name", description: - "The Amazon Resource Name (ARN) of the configured audience model that you are interested in", + "The name of the training dataset. This name must be unique in your account and region", args: { name: "string", }, }, { - name: "--cli-input-json", + name: "--role-arn", description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + "The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the dataSource field of each dataset. Passing a role across AWS accounts is not allowed. If you pass a role that isn't in your account, you get an AccessDeniedException error", args: { name: "string", }, }, { - name: "--generate-cli-skeleton", + name: "--training-data", + description: + "An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables", + args: { + name: "list", + }, + }, + { + name: "--tags", + description: + "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50. For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 Unicode characters in UTF-8. If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit", + args: { + name: "map", + }, + }, + { + name: "--description", + description: "The description of the training dataset", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-audience-generation-job", + description: + "Deletes the specified audience generation job, and removes all data associated with the job", + options: [ + { + name: "--audience-generation-job-arn", + description: + "The Amazon Resource Name (ARN) of the audience generation job that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-audience-model", + description: + "Specifies an audience model that you want to delete. You can't delete an audience model if there are any configured audience models that depend on the audience model", + options: [ + { + name: "--audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the audience model that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-configured-audience-model", + description: + "Deletes the specified configured audience model. You can't delete a configured audience model if there are any lookalike models that use the configured audience model. If you delete a configured audience model, it will be removed from any collaborations that it is associated to", + options: [ + { + name: "--configured-audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the configured audience model that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-configured-audience-model-policy", + description: "Deletes the specified configured audience model policy", + options: [ + { + name: "--configured-audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the configured audience model policy that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-configured-model-algorithm", + description: "Deletes a configured model algorithm", + options: [ + { + name: "--configured-model-algorithm-arn", + description: + "The Amazon Resource Name (ARN) of the configured model algorithm that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-configured-model-algorithm-association", + description: "Deletes a configured model algorithm association", + options: [ + { + name: "--configured-model-algorithm-association-arn", + description: + "The Amazon Resource Name (ARN) of the configured model algorithm association that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the member that is deleting the configured model algorithm association", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-ml-configuration", + description: "Deletes a ML modeling configuration", + options: [ + { + name: "--membership-identifier", + description: + "The membership ID of the of the member that is deleting the ML modeling configuration", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-ml-input-channel-data", + description: + "Provides the information necessary to delete an ML input channel", + options: [ + { + name: "--ml-input-channel-arn", + description: + "The Amazon Resource Name (ARN) of the ML input channel that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the membership that contains the ML input channel you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-trained-model-output", + description: "Deletes the output of a trained model", + options: [ + { + name: "--trained-model-arn", + description: + "The Amazon Resource Name (ARN) of the trained model whose output you want to delete", + args: { + name: "string", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the member that is deleting the trained model output", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-training-dataset", + description: + "Specifies a training dataset that you want to delete. You can't delete a training dataset if there are any audience models that depend on the training dataset. In Clean Rooms ML, the TrainingDataset is metadata that points to a Glue table, which is read only during AudienceModel creation. This action deletes the metadata", + options: [ + { + name: "--training-dataset-arn", + description: + "The Amazon Resource Name (ARN) of the training dataset that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-audience-generation-job", + description: "Returns information about an audience generation job", + options: [ + { + name: "--audience-generation-job-arn", + description: + "The Amazon Resource Name (ARN) of the audience generation job that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-audience-model", + description: "Returns information about an audience model", + options: [ + { + name: "--audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the audience model that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-collaboration-configured-model-algorithm-association", + description: + "Returns information about the configured model algorithm association in a collaboration", + options: [ + { + name: "--configured-model-algorithm-association-arn", + description: + "The Amazon Resource Name (ARN) of the configured model algorithm association that you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID for the collaboration that contains the configured model algorithm association that you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-collaboration-ml-input-channel", + description: + "Returns information about a specific ML input channel in a collaboration", + options: [ + { + name: "--ml-input-channel-arn", + description: + "The Amazon Resource Name (ARN) of the ML input channel that you want to get", + args: { + name: "string", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID of the collaboration that contains the ML input channel that you want to get", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-collaboration-trained-model", + description: + "Returns information about a trained model in a collaboration", + options: [ + { + name: "--trained-model-arn", + description: + "The Amazon Resource Name (ARN) of the trained model that you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID that contains the trained model that you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-configured-audience-model", + description: + "Returns information about a specified configured audience model", + options: [ + { + name: "--configured-audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the configured audience model that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-configured-audience-model-policy", + description: + "Returns information about a configured audience model policy", + options: [ + { + name: "--configured-audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the configured audience model that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-configured-model-algorithm", + description: "Returns information about a configured model algorithm", + options: [ + { + name: "--configured-model-algorithm-arn", + description: + "The Amazon Resource Name (ARN) of the configured model algorithm that you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-configured-model-algorithm-association", + description: + "Returns information about a configured model algorithm association", + options: [ + { + name: "--configured-model-algorithm-association-arn", + description: + "The Amazon Resource Name (ARN) of the configured model algorithm association that you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the member that created the configured model algorithm association", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-ml-configuration", + description: "Returns information about a specific ML configuration", + options: [ + { + name: "--membership-identifier", + description: + "The membership ID of the member that owns the ML configuration you want to return information about", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-ml-input-channel", + description: "Returns information about an ML input channel", + options: [ + { + name: "--ml-input-channel-arn", + description: + "The Amazon Resource Name (ARN) of the ML input channel that you want to get", + args: { + name: "string", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the membership that contains the ML input channel that you want to get", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-trained-model", + description: "Returns information about a trained model", + options: [ + { + name: "--trained-model-arn", + description: + "The Amazon Resource Name (ARN) of the trained model that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the member that created the trained model that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-trained-model-inference-job", + description: "Returns information about a trained model inference job", + options: [ + { + name: "--membership-identifier", + description: + "Provides the membership ID of the membership that contains the trained model inference job that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--trained-model-inference-job-arn", + description: + "Provides the Amazon Resource Name (ARN) of the trained model inference job that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-training-dataset", + description: "Returns information about a training dataset", + options: [ + { + name: "--training-dataset-arn", + description: + "The Amazon Resource Name (ARN) of the training dataset that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-audience-export-jobs", + description: "Returns a list of the audience export jobs", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--audience-generation-job-arn", + description: + "The Amazon Resource Name (ARN) of the audience generation job that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-audience-generation-jobs", + description: "Returns a list of audience generation jobs", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--configured-audience-model-arn", + description: + "The Amazon Resource Name (ARN) of the configured audience model that was used for the audience generation jobs that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--collaboration-id", + description: + "The identifier of the collaboration that contains the audience generation jobs that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-audience-models", + description: "Returns a list of audience models", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-collaboration-configured-model-algorithm-associations", + description: + "Returns a list of the configured model algorithm associations in a collaboration", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID of the collaboration that contains the configured model algorithm associations that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-collaboration-ml-input-channels", + description: "Returns a list of the ML input channels in a collaboration", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of results to return", + args: { + name: "integer", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID of the collaboration that contains the ML input channels that you want to list", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-collaboration-trained-model-export-jobs", + description: + "Returns a list of the export jobs for a trained model in a collaboration", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID of the collaboration that contains the trained model export jobs that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--trained-model-arn", + description: + "The Amazon Resource Name (ARN) of the trained model that was used to create the export jobs that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-collaboration-trained-model-inference-jobs", + description: + "Returns a list of trained model inference jobs in a specified collaboration", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID of the collaboration that contains the trained model inference jobs that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--trained-model-arn", + description: + "The Amazon Resource Name (ARN) of the trained model that was used to create the trained model inference jobs that you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-collaboration-trained-models", + description: "Returns a list of the trained models in a collaboration", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--collaboration-identifier", + description: + "The collaboration ID of the collaboration that contains the trained models you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-configured-audience-models", + description: "Returns a list of the configured audience models", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-configured-model-algorithm-associations", + description: "Returns a list of configured model algorithm associations", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the member that created the configured model algorithm associations you are interested in", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-configured-model-algorithms", + description: "Returns a list of configured model algorithms", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum size of the results that is returned per call", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-ml-input-channels", + description: "Returns a list of ML input channels", + options: [ + { + name: "--next-token", + description: + "The token value retrieved from a previous call to access the next page of results", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of ML input channels to return", + args: { + name: "integer", + }, + }, + { + name: "--membership-identifier", + description: + "The membership ID of the membership that contains the ML input channels that you want to list", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", description: - "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", args: { name: "string", - suggestions: ["input", "output"], }, }, - ], - }, - { - name: "get-configured-audience-model-policy", - description: - "Returns information about a configured audience model policy", - options: [ { - name: "--configured-audience-model-arn", + name: "--page-size", description: - "The Amazon Resource Name (ARN) of the configured audience model that you are interested in", + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", args: { - name: "string", + name: "integer", }, }, { - name: "--cli-input-json", + name: "--max-items", description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", args: { - name: "string", + name: "integer", }, }, { @@ -521,13 +2346,13 @@ const completionSpec: Fig.Spec = { ], }, { - name: "get-training-dataset", - description: "Returns information about a training dataset", + name: "list-tags-for-resource", + description: "Returns a list of tags for a provided resource", options: [ { - name: "--training-dataset-arn", + name: "--resource-arn", description: - "The Amazon Resource Name (ARN) of the training dataset that you are interested in", + "The Amazon Resource Name (ARN) of the resource that you are interested in", args: { name: "string", }, @@ -552,8 +2377,9 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-audience-export-jobs", - description: "Returns a list of the audience export jobs", + name: "list-trained-model-inference-jobs", + description: + "Returns a list of trained model inference jobs that match the request parameters", options: [ { name: "--next-token", @@ -572,9 +2398,16 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--audience-generation-job-arn", + name: "--membership-identifier", + description: "The membership", + args: { + name: "string", + }, + }, + { + name: "--trained-model-arn", description: - "The Amazon Resource Name (ARN) of the audience generation job that you are interested in", + "The Amazon Resource Name (ARN) of a trained model that was used to create the trained model inference jobs that you are interested in", args: { name: "string", }, @@ -623,8 +2456,8 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-audience-generation-jobs", - description: "Returns a list of audience generation jobs", + name: "list-trained-models", + description: "Returns a list of trained models", options: [ { name: "--next-token", @@ -643,17 +2476,9 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--configured-audience-model-arn", - description: - "The Amazon Resource Name (ARN) of the configured audience model that was used for the audience generation jobs that you are interested in", - args: { - name: "string", - }, - }, - { - name: "--collaboration-id", + name: "--membership-identifier", description: - "The identifier of the collaboration that contains the audience generation jobs that you are interested in", + "The membership ID of the member that created the trained models you are interested in", args: { name: "string", }, @@ -702,8 +2527,8 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-audience-models", - description: "Returns a list of audience models", + name: "list-training-datasets", + description: "Returns a list of training datasets", options: [ { name: "--next-token", @@ -765,55 +2590,47 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-configured-audience-models", - description: "Returns a list of the configured audience models", + name: "put-configured-audience-model-policy", + description: + "Create or update the resource policy for a configured audience model", options: [ { - name: "--next-token", + name: "--configured-audience-model-arn", description: - "The token value retrieved from a previous call to access the next page of results", + "The Amazon Resource Name (ARN) of the configured audience model that the resource policy will govern", args: { name: "string", }, }, { - name: "--max-results", - description: - "The maximum size of the results that is returned per call", - args: { - name: "integer", - }, - }, - { - name: "--cli-input-json", - description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + name: "--configured-audience-model-policy", + description: "The IAM resource policy", args: { name: "string", }, }, { - name: "--starting-token", + name: "--previous-policy-hash", description: - "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + "A cryptographic hash of the contents of the policy used to prevent unexpected concurrent modification of the policy", args: { name: "string", }, }, { - name: "--page-size", + name: "--policy-existence-condition", description: - "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + "Use this to prevent unexpected concurrent modification of the policy", args: { - name: "integer", + name: "string", }, }, { - name: "--max-items", + name: "--cli-input-json", description: - "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", args: { - name: "integer", + name: "string", }, }, { @@ -828,17 +2645,25 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-tags-for-resource", - description: "Returns a list of tags for a provided resource", + name: "put-ml-configuration", + description: "Assigns information about an ML configuration", options: [ { - name: "--resource-arn", + name: "--membership-identifier", description: - "The Amazon Resource Name (ARN) of the resource that you are interested in", + "The membership ID of the member that is being configured", args: { name: "string", }, }, + { + name: "--default-output-location", + description: + "The default Amazon S3 location where ML output is stored for the specified member", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -859,55 +2684,46 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-training-datasets", - description: "Returns a list of training datasets", + name: "start-audience-export-job", + description: + "Export an audience of a specified size after you have generated an audience", options: [ { - name: "--next-token", - description: - "The token value retrieved from a previous call to access the next page of results", + name: "--name", + description: "The name of the audience export job", args: { name: "string", }, }, { - name: "--max-results", - description: - "The maximum size of the results that is returned per call", - args: { - name: "integer", - }, - }, - { - name: "--cli-input-json", + name: "--audience-generation-job-arn", description: - "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + "The Amazon Resource Name (ARN) of the audience generation job that you want to export", args: { name: "string", }, }, { - name: "--starting-token", + name: "--audience-size", description: - "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + "The size of the generated audience. Must match one of the sizes in the configured audience model", args: { - name: "string", + name: "structure", }, }, { - name: "--page-size", - description: - "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + name: "--description", + description: "The description of the audience export job", args: { - name: "integer", + name: "string", }, }, { - name: "--max-items", + name: "--cli-input-json", description: - "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", args: { - name: "integer", + name: "string", }, }, { @@ -922,41 +2738,65 @@ const completionSpec: Fig.Spec = { ], }, { - name: "put-configured-audience-model-policy", - description: - "Create or update the resource policy for a configured audience model", + name: "start-audience-generation-job", + description: "Information necessary to start the audience generation job", options: [ + { + name: "--name", + description: "The name of the audience generation job", + args: { + name: "string", + }, + }, { name: "--configured-audience-model-arn", description: - "The Amazon Resource Name (ARN) of the configured audience model that the resource policy will govern", + "The Amazon Resource Name (ARN) of the configured audience model that is used for this audience generation job", args: { name: "string", }, }, { - name: "--configured-audience-model-policy", - description: "The IAM resource policy", + name: "--seed-audience", + description: + "The seed audience that is used to generate the audience", args: { - name: "string", + name: "structure", }, }, { - name: "--previous-policy-hash", + name: "--include-seed-in-output", description: - "A cryptographic hash of the contents of the policy used to prevent unexpected concurrent modification of the policy", + "Whether the seed audience is included in the audience generation output", + }, + { + name: "--no-include-seed-in-output", + description: + "Whether the seed audience is included in the audience generation output", + }, + { + name: "--collaboration-id", + description: + "The identifier of the collaboration that contains the audience generation job", args: { name: "string", }, }, { - name: "--policy-existence-condition", - description: - "Use this to prevent unexpected concurrent modification of the policy", + name: "--description", + description: "The description of the audience generation job", args: { name: "string", }, }, + { + name: "--tags", + description: + "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50. For each resource, each tag key must be unique, and each tag key can have only one value. Maximum key length - 128 Unicode characters in UTF-8. Maximum value length - 256 Unicode characters in UTF-8. If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys and values are case sensitive. Do not use aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys as it is reserved for AWS use. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has aws as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of aws do not count against your tags per resource limit", + args: { + name: "map", + }, + }, { name: "--cli-input-json", description: @@ -977,36 +2817,44 @@ const completionSpec: Fig.Spec = { ], }, { - name: "start-audience-export-job", + name: "start-trained-model-export-job", description: - "Export an audience of a specified size after you have generated an audience", + "Provides the information necessary to start a trained model export job", options: [ { name: "--name", - description: "The name of the audience export job", + description: "The name of the trained model export job", args: { name: "string", }, }, { - name: "--audience-generation-job-arn", + name: "--trained-model-arn", description: - "The Amazon Resource Name (ARN) of the audience generation job that you want to export", + "The Amazon Resource Name (ARN) of the trained model that you want to export", args: { name: "string", }, }, { - name: "--audience-size", + name: "--membership-identifier", description: - "The size of the generated audience. Must match one of the sizes in the configured audience model", + "The membership ID of the member that is receiving the exported trained model artifacts", + args: { + name: "string", + }, + }, + { + name: "--output-configuration", + description: + "The output configuration information for the trained model export job", args: { name: "structure", }, }, { name: "--description", - description: "The description of the audience export job", + description: "The description of the trained model export job", args: { name: "string", }, @@ -1031,53 +2879,91 @@ const completionSpec: Fig.Spec = { ], }, { - name: "start-audience-generation-job", - description: "Information necessary to start the audience generation job", + name: "start-trained-model-inference-job", + description: + "Defines the information necessary to begin a trained model inference job", options: [ + { + name: "--membership-identifier", + description: + "The membership ID of the membership that contains the trained model inference job", + args: { + name: "string", + }, + }, { name: "--name", - description: "The name of the audience generation job", + description: "The name of the trained model inference job", args: { name: "string", }, }, { - name: "--configured-audience-model-arn", + name: "--trained-model-arn", description: - "The Amazon Resource Name (ARN) of the configured audience model that is used for this audience generation job", + "The Amazon Resource Name (ARN) of the trained model that is used for this trained model inference job", args: { name: "string", }, }, { - name: "--seed-audience", + name: "--configured-model-algorithm-association-arn", description: - "The seed audience that is used to generate the audience", + "The Amazon Resource Name (ARN) of the configured model algorithm association that is used for this trained model inference job", args: { - name: "structure", + name: "string", }, }, { - name: "--include-seed-in-output", + name: "--resource-config", description: - "Whether the seed audience is included in the audience generation output", + "Defines the resource configuration for the trained model inference job", + args: { + name: "structure", + }, }, { - name: "--no-include-seed-in-output", + name: "--output-configuration", description: - "Whether the seed audience is included in the audience generation output", + "Defines the output configuration information for the trained model inference job", + args: { + name: "structure", + }, }, { - name: "--collaboration-id", + name: "--data-source", description: - "The identifier of the collaboration that contains the audience generation job", + "Defines he data source that is used for the trained model inference job", args: { - name: "string", + name: "structure", }, }, { name: "--description", - description: "The description of the audience generation job", + description: "The description of the trained model inference job", + args: { + name: "string", + }, + }, + { + name: "--container-execution-parameters", + description: "The execution parameters for the container", + args: { + name: "structure", + }, + }, + { + name: "--environment", + description: + "The environment variables to set in the Docker container", + args: { + name: "map", + }, + }, + { + name: "--kms-key-arn", + description: + "The Amazon Resource Name (ARN) of the KMS key. This key is used to encrypt and decrypt customer-owned data in the ML inference job and associated data", args: { name: "string", }, diff --git a/src/aws/cloudfront.ts b/src/aws/cloudfront.ts index 3e004857510..91ef752b63d 100644 --- a/src/aws/cloudfront.ts +++ b/src/aws/cloudfront.ts @@ -3220,7 +3220,7 @@ const completionSpec: Fig.Spec = { { name: "list-origin-access-controls", description: - "Gets the list of CloudFront origin access controls in this Amazon Web Services account. You can optionally specify the maximum number of items to receive in the response. If the total number of items in the list exceeds the maximum that you specify, or the default maximum, the response is paginated. To get the next page of items, send another request that specifies the NextMarker value from the current response as the Marker value in the next request", + "Gets the list of CloudFront origin access controls (OACs) in this Amazon Web Services account. You can optionally specify the maximum number of items to receive in the response. If the total number of items in the list exceeds the maximum that you specify, or the default maximum, the response is paginated. To get the next page of items, send another request that specifies the NextMarker value from the current response as the Marker value in the next request. If you're not using origin access controls for your Amazon Web Services account, the ListOriginAccessControls operation doesn't return the Items element in the response", options: [ { name: "--marker", @@ -3321,7 +3321,7 @@ const completionSpec: Fig.Spec = { { name: "--max-items", description: - "The maximum number of public keys you want in the response body", + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", args: { name: "string", }, @@ -3334,6 +3334,22 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, { name: "--generate-cli-skeleton", description: @@ -3832,7 +3848,7 @@ const completionSpec: Fig.Spec = { { name: "update-distribution", description: - "Updates the configuration for a CloudFront distribution. The update process includes getting the current distribution configuration, updating it to make your changes, and then submitting an UpdateDistribution request to make the updates. To update a web distribution using the CloudFront API Use GetDistributionConfig to get the current configuration, including the version identifier (ETag). Update the distribution configuration that was returned in the response. Note the following important requirements and restrictions: You must rename the ETag field to IfMatch, leaving the value unchanged. (Set the value of IfMatch to the value of ETag, then remove the ETag field.) You can't change the value of CallerReference. Submit an UpdateDistribution request, providing the distribution configuration. The new configuration replaces the existing configuration. The values that you specify in an UpdateDistribution request are not merged into your existing configuration. Make sure to include all fields: the ones that you modified and also the ones that you didn't", + "Updates the configuration for a CloudFront distribution. The update process includes getting the current distribution configuration, updating it to make your changes, and then submitting an UpdateDistribution request to make the updates. To update a web distribution using the CloudFront API Use GetDistributionConfig to get the current configuration, including the version identifier (ETag). Update the distribution configuration that was returned in the response. Note the following important requirements and restrictions: You must copy the ETag field value from the response. (You'll use it for the IfMatch parameter in your request.) Then, remove the ETag field from the distribution configuration. You can't change the value of CallerReference. Submit an UpdateDistribution request, providing the updated distribution configuration. The new configuration replaces the existing configuration. The values that you specify in an UpdateDistribution request are not merged into your existing configuration. Make sure to include all fields: the ones that you modified and also the ones that you didn't", options: [ { name: "--distribution-config", diff --git a/src/aws/codebuild.ts b/src/aws/codebuild.ts index 43c4d71a8a2..c9b79c6afb6 100644 --- a/src/aws/codebuild.ts +++ b/src/aws/codebuild.ts @@ -239,7 +239,7 @@ const completionSpec: Fig.Spec = { { name: "--environment-type", description: - "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", + "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type ARM_EC2 is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_EC2 is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia Pacific (Sydney). The environment type WINDOWS_EC2 is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", args: { name: "string", }, @@ -247,11 +247,19 @@ const completionSpec: Fig.Spec = { { name: "--compute-type", description: - "Information about the compute resources the compute fleet uses. Available values include: BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds. BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_XLARGE: Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed. If you use BUILD_GENERAL1_SMALL: For environment type LINUX_CONTAINER, you can use up to 3 GB memory and 2 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds. For environment type ARM_CONTAINER, you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds. If you use BUILD_GENERAL1_LARGE: For environment type LINUX_CONTAINER, you can use up to 15 GB memory and 8 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. For environment type ARM_CONTAINER, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds. For more information, see Build environment compute types in the CodeBuild User Guide", + "Information about the compute resources the compute fleet uses. Available values include: ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine. If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide. BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds. BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds. BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed. BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. If you use BUILD_GENERAL1_SMALL: For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds. For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds. If you use BUILD_GENERAL1_LARGE: For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds. For more information, see On-demand environment types in the CodeBuild User Guide", args: { name: "string", }, }, + { + name: "--compute-configuration", + description: + "The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE", + args: { + name: "structure", + }, + }, { name: "--scaling-configuration", description: "The scaling configuration of the compute fleet", @@ -495,6 +503,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--auto-retry-limit", + description: + "The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -2501,6 +2517,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--auto-retry-limit-override", + description: + "The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -2879,7 +2903,7 @@ const completionSpec: Fig.Spec = { { name: "--environment-type", description: - "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", + "The environment type of the compute fleet. The environment type ARM_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo). The environment type ARM_EC2 is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_EC2 is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia Pacific (Sydney). The environment type WINDOWS_EC2 is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai). For more information, see Build environment compute types in the CodeBuild user guide", args: { name: "string", }, @@ -2887,11 +2911,19 @@ const completionSpec: Fig.Spec = { { name: "--compute-type", description: - "Information about the compute resources the compute fleet uses. Available values include: BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds. BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_XLARGE: Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed. If you use BUILD_GENERAL1_SMALL: For environment type LINUX_CONTAINER, you can use up to 3 GB memory and 2 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds. For environment type ARM_CONTAINER, you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds. If you use BUILD_GENERAL1_LARGE: For environment type LINUX_CONTAINER, you can use up to 15 GB memory and 8 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. For environment type ARM_CONTAINER, you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds. For more information, see Build environment compute types in the CodeBuild User Guide", + "Information about the compute resources the compute fleet uses. Available values include: ATTRIBUTE_BASED_COMPUTE: Specify the amount of vCPUs, memory, disk space, and the type of machine. If you use ATTRIBUTE_BASED_COMPUTE, you must define your attributes by using computeConfiguration. CodeBuild will select the cheapest instance that satisfies your specified attributes. For more information, see Reserved capacity environment types in the CodeBuild User Guide. BUILD_GENERAL1_SMALL: Use up to 4 GiB memory and 2 vCPUs for builds. BUILD_GENERAL1_MEDIUM: Use up to 8 GiB memory and 4 vCPUs for builds. BUILD_GENERAL1_LARGE: Use up to 16 GiB memory and 8 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_XLARGE: Use up to 72 GiB memory and 36 vCPUs for builds, depending on your environment type. BUILD_GENERAL1_2XLARGE: Use up to 144 GiB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed. BUILD_LAMBDA_1GB: Use up to 1 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_2GB: Use up to 2 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_4GB: Use up to 4 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_8GB: Use up to 8 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. BUILD_LAMBDA_10GB: Use up to 10 GiB memory for builds. Only available for environment type LINUX_LAMBDA_CONTAINER and ARM_LAMBDA_CONTAINER. If you use BUILD_GENERAL1_SMALL: For environment type LINUX_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 16 GiB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds. For environment type ARM_CONTAINER, you can use up to 4 GiB memory and 2 vCPUs on ARM-based processors for builds. If you use BUILD_GENERAL1_LARGE: For environment type LINUX_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs for builds. For environment type LINUX_GPU_CONTAINER, you can use up to 255 GiB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. For environment type ARM_CONTAINER, you can use up to 16 GiB memory and 8 vCPUs on ARM-based processors for builds. For more information, see On-demand environment types in the CodeBuild User Guide", args: { name: "string", }, }, + { + name: "--compute-configuration", + description: + "The compute configuration of the compute fleet. This is only required if computeType is set to ATTRIBUTE_BASED_COMPUTE", + args: { + name: "structure", + }, + }, { name: "--scaling-configuration", description: "The scaling configuration of the compute fleet", @@ -3135,6 +3167,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--auto-retry-limit", + description: + "The maximum number of additional automatic retries after a failed build. For example, if the auto-retry limit is set to 2, CodeBuild will call the RetryBuild API to automatically retry your build for up to 2 additional times", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/connect.ts b/src/aws/connect.ts index fa3e1802c35..5a23e316191 100644 --- a/src/aws/connect.ts +++ b/src/aws/connect.ts @@ -587,12 +587,13 @@ const completionSpec: Fig.Spec = { }, { name: "associate-traffic-distribution-group-user", - description: "Associates an agent with a traffic distribution group", + description: + "Associates an agent with a traffic distribution group. This API can be called only in the Region where the traffic distribution group is created", options: [ { name: "--traffic-distribution-group-id", description: - "The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region", + "The identifier of the traffic distribution group. This can be the ID or the ARN of the traffic distribution group", args: { name: "string", }, @@ -2268,7 +2269,7 @@ const completionSpec: Fig.Spec = { { name: "create-traffic-distribution-group", description: - "Creates a traffic distribution group given an Amazon Connect instance that has been replicated. The SignInConfig distribution is available only on a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned. For more information about creating traffic distribution groups, see Set up traffic distribution groups in the Amazon Connect Administrator Guide", + "Creates a traffic distribution group given an Amazon Connect instance that has been replicated. The SignInConfig distribution is available only on a default TrafficDistributionGroup (see the IsDefault parameter in the TrafficDistributionGroup data type). If you call UpdateTrafficDistribution with a modified SignInConfig and a non-default TrafficDistributionGroup, an InvalidRequestException is returned. For more information about creating traffic distribution groups, see Set up traffic distribution groups in the Amazon Connect Administrator Guide", options: [ { name: "--name", @@ -3418,7 +3419,7 @@ const completionSpec: Fig.Spec = { { name: "--traffic-distribution-group-id", description: - "The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region", + "The identifier of the traffic distribution group. This can be the ID or the ARN of the traffic distribution group", args: { name: "string", }, @@ -5129,12 +5130,13 @@ const completionSpec: Fig.Spec = { }, { name: "disassociate-traffic-distribution-group-user", - description: "Disassociates an agent from a traffic distribution group", + description: + "Disassociates an agent from a traffic distribution group. This API can be called only in the Region where the traffic distribution group is created", options: [ { name: "--traffic-distribution-group-id", description: - "The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region", + "The identifier of the traffic distribution group. This can be the ID or the ARN of the traffic distribution group", args: { name: "string", }, @@ -5681,7 +5683,7 @@ const completionSpec: Fig.Spec = { { name: "get-metric-data-v2", description: - "Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide", + "Gets metric data from the specified Amazon Connect instance. GetMetricDataV2 offers more features than GetMetricData, the previous version of this API. It has new metrics, offers filtering at a metric level, and offers the ability to filter and group data by channels, queues, routing profiles, agents, and agent hierarchy levels. It can retrieve historical data for the last 3 months, at varying intervals. It does not support agent queues. For a description of the historical metrics that are supported by GetMetricDataV2 and GetMetricData, see Historical metrics definitions in the Amazon Connect Administrator Guide", options: [ { name: "--resource-arn", @@ -5734,7 +5736,7 @@ const completionSpec: Fig.Spec = { { name: "--metrics", description: - 'The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts', + 'The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. ABANDONMENT_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Abandonment rate AGENT_ADHERENT_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherent time AGENT_ANSWER_RATE Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent answer rate AGENT_NON_ADHERENT_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Non-adherent time AGENT_NON_RESPONSE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent non-response AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy Data for this metric is available starting from October 1, 2023 0:00:00 GMT. UI name: Agent non-response without customer abandons AGENT_OCCUPANCY Unit: Percentage Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Occupancy AGENT_SCHEDULE_ADHERENCE This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Adherence AGENT_SCHEDULED_TIME This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Scheduled time AVG_ABANDON_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue abandon time AVG_ACTIVE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average active time AVG_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average after contact work time Feature is a valid filter but not a valid grouping. AVG_AGENT_CONNECTING_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Average agent API connecting time The Negate key in metric-level filters is not applicable for this metric. AVG_AGENT_PAUSE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Average agent pause time AVG_CASE_RELATED_CONTACTS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average contacts per case AVG_CASE_RESOLUTION_TIME Unit: Seconds Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Average case resolution time AVG_CONTACT_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average contact duration Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent greeting time AVG_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression UI name: Average handle time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time Feature is a valid filter but not a valid grouping. AVG_HOLD_TIME_ALL_CONTACTS Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer hold time all contacts AVG_HOLDS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average holds Feature is a valid filter but not a valid grouping. AVG_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction and customer hold time AVG_INTERACTION_TIME Unit: Seconds Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interaction time Feature is a valid filter but not a valid grouping. AVG_INTERRUPTIONS_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruptions AVG_INTERRUPTION_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent interruption time AVG_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average non-talk time AVG_QUEUE_ANSWER_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average queue answer time Feature is a valid filter but not a valid grouping. AVG_RESOLUTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average resolution time AVG_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average talk time AVG_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average agent talk time AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts handled (connected to agent timestamp) CONTACTS_HOLD_ABANDONS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts hold disconnect CONTACTS_ON_HOLD_AGENT_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold agent disconnect CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts hold customer disconnect CONTACTS_PUT_ON_HOLD Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts put on hold CONTACTS_TRANSFERRED_OUT_EXTERNAL Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out external CONTACTS_TRANSFERRED_OUT_INTERNAL Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contacts transferred out internal CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts removed from queue in X seconds CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts resolved in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Percent Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Agent talk time percent PERCENT_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Customer talk time percent REOPENED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases reopened RESOLVED_CASE_ACTIONS Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved SERVICE_LEVEL You can include up to 20 SERVICE_LEVEL metrics in a request. Unit: Percent Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API connecting time The Negate key in metric-level filters is not applicable for this metric. CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you can use LT (for "Less than") or LTE (for "Less than equal"). UI name: Contacts answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent interaction time SUM_NON_PRODUCTIVE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Non-Productive Time SUM_ONLINE_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy UI name: Online time SUM_RETRY_CALLBACK_ATTEMPTS Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Callback attempts', args: { name: "list", }, @@ -11717,6 +11719,54 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "start-screen-sharing", + description: + "Starts screen sharing for a contact. For more information about screen sharing, see Set up in-app, web, video calling, and screen sharing capabilities in the Amazon Connect Administrator Guide", + options: [ + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs", + args: { + name: "string", + }, + }, + { + name: "--instance-id", + description: + "The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance", + args: { + name: "string", + }, + }, + { + name: "--contact-id", + description: + "The identifier of the contact in this instance of Amazon Connect", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "start-task-contact", description: diff --git a/src/aws/controltower.ts b/src/aws/controltower.ts index 159a84e0d2e..dcc5c4e5919 100644 --- a/src/aws/controltower.ts +++ b/src/aws/controltower.ts @@ -969,6 +969,36 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "reset-enabled-control", + description: "Resets an enabled control", + options: [ + { + name: "--enabled-control-identifier", + description: "The ARN of the enabled control to be reset", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "reset-landing-zone", description: @@ -1125,7 +1155,7 @@ const completionSpec: Fig.Spec = { { name: "update-enabled-control", description: - "Updates the configuration of an already enabled control. If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or you can run an extending governance operation. For usage examples, see the Controls Reference Guide", + "Updates the configuration of an already enabled control. If the enabled control shows an EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling the ResetEnabledControl API. Alternatively, you can call DisableControl and then call EnableControl again. Also, you can run an extending governance operation to repair drift. For usage examples, see the Controls Reference Guide", options: [ { name: "--enabled-control-identifier", diff --git a/src/aws/datasync.ts b/src/aws/datasync.ts index 9b0738f9f3f..d5efde3ad3f 100644 --- a/src/aws/datasync.ts +++ b/src/aws/datasync.ts @@ -126,12 +126,12 @@ const completionSpec: Fig.Spec = { { name: "create-agent", description: - "Activates an DataSync agent that you've deployed in your storage environment. The activation process associates the agent with your Amazon Web Services account. If you haven't deployed an agent yet, see the following topics to learn more: Agent requirements Create an agent If you're transferring between Amazon Web Services storage services, you don't need a DataSync agent", + "Activates an DataSync agent that you deploy in your storage environment. The activation process associates the agent with your Amazon Web Services account. If you haven't deployed an agent yet, see Do I need a DataSync agent?", options: [ { name: "--activation-key", description: - "Specifies your DataSync agent's activation key. If you don't have an activation key, see Activate your agent", + "Specifies your DataSync agent's activation key. If you don't have an activation key, see Activating your agent", args: { name: "string", }, @@ -139,7 +139,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-name", description: - "Specifies a name for your agent. You can see this name in the DataSync console", + "Specifies a name for your agent. We recommend specifying a name that you can remember", args: { name: "string", }, @@ -155,7 +155,7 @@ const completionSpec: Fig.Spec = { { name: "--vpc-endpoint-id", description: - "Specifies the ID of the VPC endpoint that you want your agent to connect to. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1. The VPC endpoint you use must include the DataSync service name (for example, com.amazonaws.us-east-2.datasync)", + "Specifies the ID of the VPC service endpoint that you're using. For example, a VPC endpoint ID looks like vpce-01234d5aff67890e1. The VPC service endpoint you use must include the DataSync service name (for example, com.amazonaws.us-east-2.datasync)", args: { name: "string", }, @@ -163,7 +163,7 @@ const completionSpec: Fig.Spec = { { name: "--subnet-arns", description: - "Specifies the ARN of the subnet where you want to run your DataSync task when using a VPC endpoint. This is the subnet where DataSync creates and manages the network interfaces for your transfer. You can only specify one ARN", + "Specifies the ARN of the subnet where your VPC service endpoint is located. You can only specify one ARN", args: { name: "list", }, @@ -171,7 +171,7 @@ const completionSpec: Fig.Spec = { { name: "--security-group-arns", description: - "Specifies the Amazon Resource Name (ARN) of the security group that protects your task's network interfaces when using a virtual private cloud (VPC) endpoint. You can only specify one ARN", + "Specifies the Amazon Resource Name (ARN) of the security group that allows traffic between your agent and VPC service endpoint. You can only specify one ARN", args: { name: "list", }, @@ -291,14 +291,14 @@ const completionSpec: Fig.Spec = { { name: "--subdirectory", description: - "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories. You must specify a value with forward slashes (for example, /path/to/folder)", + "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system. By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder)", args: { name: "string", }, }, { name: "--efs-filesystem-arn", - description: "Specifies the ARN for the Amazon EFS file system", + description: "Specifies the ARN for your Amazon EFS file system", args: { name: "string", }, @@ -306,7 +306,7 @@ const completionSpec: Fig.Spec = { { name: "--ec2-config", description: - "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system", + "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's mount targets", args: { name: "structure", }, @@ -322,7 +322,7 @@ const completionSpec: Fig.Spec = { { name: "--access-point-arn", description: - "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system", + "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system. For more information, see Accessing restricted file systems", args: { name: "string", }, @@ -330,7 +330,7 @@ const completionSpec: Fig.Spec = { { name: "--file-system-access-role-arn", description: - "Specifies an Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system", + "Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system. For information on creating this role, see Creating a DataSync IAM role for file system access", args: { name: "string", }, @@ -338,7 +338,7 @@ const completionSpec: Fig.Spec = { { name: "--in-transit-encryption", description: - "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2", + "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system. If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2", args: { name: "string", }, @@ -570,7 +570,7 @@ const completionSpec: Fig.Spec = { { name: "--security-group-arns", description: - "Specifies the ARNs of the security groups that provide access to your file system's preferred subnet. If you choose a security group that doesn't allow connections from within itself, do one of the following: Configure the security group to allow it to communicate within itself. Choose a different security group that can communicate with the mount target's security group", + "Specifies the ARNs of the Amazon EC2 security groups that provide access to your file system's preferred subnet. The security groups that you specify must be able to communicate with your file system's security groups. For information about configuring security groups for file system access, see the Amazon FSx for Windows File Server User Guide . If you choose a security group that doesn't allow connections from within itself, do one of the following: Configure the security group to allow it to communicate within itself. Choose a different security group that can communicate with the mount target's security group", args: { name: "list", }, @@ -722,7 +722,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster", + "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster", args: { name: "list", }, @@ -778,7 +778,7 @@ const completionSpec: Fig.Spec = { { name: "--on-prem-config", description: - "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server. You can specify more than one agent. For more information, see Using multiple agents for transfers", + "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server. You can specify more than one agent. For more information, see Using multiple DataSync agents", args: { name: "structure", }, @@ -882,7 +882,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location", + "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system", args: { name: "list", }, @@ -1042,7 +1042,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the DataSync agent (or agents) which you want to connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", + "Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", args: { name: "list", }, @@ -1105,7 +1105,7 @@ const completionSpec: Fig.Spec = { { name: "--cloud-watch-log-group-arn", description: - "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task", + "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task. For Enhanced mode tasks, you don't need to specify anything. DataSync automatically sends logs to a CloudWatch log group named /aws/datasync", args: { name: "string", }, @@ -1152,7 +1152,7 @@ const completionSpec: Fig.Spec = { { name: "--includes", description: - "Specifies include filters define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see Specifying what DataSync transfers by using filters", + "Specifies include filters that define the files, objects, and folders in your source location that you want DataSync to transfer. For more information and examples, see Specifying what DataSync transfers by using filters", args: { name: "list", }, @@ -1173,6 +1173,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--task-mode", + description: + "Specifies one of the following task modes for your data transfer: ENHANCED - Transfer virtually unlimited numbers of objects with enhanced metrics, more detailed logs, and higher performance than Basic mode. Currently available for transfers between Amazon S3 locations. To create an Enhanced mode task, the IAM role that you use to call the CreateTask operation must have the iam:CreateServiceLinkedRole permission. BASIC (default) - Transfer files or objects between Amazon Web Services storage and on-premises, edge, or other cloud storage. DataSync quotas apply. For more information, see Understanding task mode differences", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1944,7 +1952,7 @@ const completionSpec: Fig.Spec = { { name: "describe-task-execution", description: - "Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing transfer or check the results of the transfer", + "Provides information about an execution of your DataSync task. You can use this operation to help monitor the progress of an ongoing data transfer or check the results of the transfer. Some DescribeTaskExecution response elements are only relevant to a specific task mode. For information, see Understanding task mode differences and Understanding data transfer performance metrics", options: [ { name: "--task-execution-arn", @@ -2593,7 +2601,7 @@ const completionSpec: Fig.Spec = { { name: "start-task-execution", description: - "Starts an DataSync transfer task. For each task, you can only run one task execution at a time. There are several phases to a task execution. For more information, see Task execution statuses. If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin", + "Starts an DataSync transfer task. For each task, you can only run one task execution at a time. There are several steps to a task execution. For more information, see Task execution statuses. If you're planning to transfer data to or from an Amazon S3 location, review how DataSync can affect your S3 request charges and the DataSync pricing page before you begin", options: [ { name: "--task-arn", @@ -3040,7 +3048,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "The ARNs of the agents that are used to connect to the HDFS cluster", + "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster", args: { name: "list", }, @@ -3088,7 +3096,7 @@ const completionSpec: Fig.Spec = { { name: "--on-prem-config", description: - "The DataSync agents that are connecting to a Network File System (NFS) location", + "The DataSync agents that can connect to your Network File System (NFS) file server", args: { name: "structure", }, @@ -3176,7 +3184,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location", + "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system", args: { name: "list", }, @@ -3256,7 +3264,7 @@ const completionSpec: Fig.Spec = { { name: "--agent-arns", description: - "Specifies the DataSync agent (or agents) which you want to connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", + "Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN)", args: { name: "list", }, @@ -3406,7 +3414,7 @@ const completionSpec: Fig.Spec = { { name: "--cloud-watch-log-group-arn", description: - "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task", + "Specifies the Amazon Resource Name (ARN) of an Amazon CloudWatch log group for monitoring your task. For Enhanced mode tasks, you must use /aws/datasync as your log group name. For example: arn:aws:logs:us-east-1:111222333444:log-group:/aws/datasync:* For more information, see Monitoring data transfers with CloudWatch Logs", args: { name: "string", }, diff --git a/src/aws/docdb-elastic.ts b/src/aws/docdb-elastic.ts index d953a5e9100..203acb55db5 100644 --- a/src/aws/docdb-elastic.ts +++ b/src/aws/docdb-elastic.ts @@ -3,6 +3,62 @@ const completionSpec: Fig.Spec = { description: "Amazon DocumentDB elastic clusters Amazon DocumentDB elastic-clusters support workloads with millions of reads/writes per second and petabytes of storage capacity. Amazon DocumentDB elastic clusters also simplify how developers interact with Amazon DocumentDB elastic-clusters by eliminating the need to choose, manage or upgrade instances. Amazon DocumentDB elastic-clusters were created to: provide a solution for customers looking for a database that provides virtually limitless scale with rich query capabilities and MongoDB API compatibility. give customers higher connection limits, and to reduce downtime from patching. continue investing in a cloud-native, elastic, and class leading architecture for JSON workloads", subcommands: [ + { + name: "apply-pending-maintenance-action", + description: + "The type of pending maintenance action to be applied to the resource", + options: [ + { + name: "--apply-action", + description: + "The pending maintenance action to apply to the resource. Valid actions are: ENGINE_UPDATE ENGINE_UPGRADE SECURITY_UPDATE OS_UPDATE MASTER_USER_PASSWORD_UPDATE", + args: { + name: "string", + }, + }, + { + name: "--apply-on", + description: + "A specific date to apply the pending maintenance action. Required if opt-in-type is APPLY_ON. Format: yyyy/MM/dd HH:mm-yyyy/MM/dd HH:mm", + args: { + name: "string", + }, + }, + { + name: "--opt-in-type", + description: + "A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type IMMEDIATE can't be undone", + args: { + name: "string", + }, + }, + { + name: "--resource-arn", + description: + "The Amazon DocumentDB Amazon Resource Name (ARN) of the resource to which the pending maintenance action applies", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "copy-cluster-snapshot", description: "Copies a snapshot of an elastic cluster", @@ -378,6 +434,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-pending-maintenance-action", + description: "Retrieves all maintenance actions that are pending", + options: [ + { + name: "--resource-arn", + description: + "Retrieves pending maintenance actions for a specific Amazon Resource Name (ARN)", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-cluster-snapshots", description: @@ -521,6 +608,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-pending-maintenance-actions", + description: + "Retrieves a list of all maintenance actions that are pending", + options: [ + { + name: "--max-results", + description: + "The maximum number of results to include in the response. If more records exist than the specified maxResults value, a pagination token (marker) is included in the response so that the remaining results can be retrieved", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by maxResults", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-tags-for-resource", description: "Lists all tags on a elastic cluster resource", diff --git a/src/aws/ecs.ts b/src/aws/ecs.ts index a869c69ea68..dd1a41508cb 100644 --- a/src/aws/ecs.ts +++ b/src/aws/ecs.ts @@ -513,7 +513,7 @@ const completionSpec: Fig.Spec = { { name: "create-service", description: - "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, use UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide", + "Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. Amazon Elastic Inference (EI) is no longer available to customers. In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer. There are two service scheduler strategies available: REPLICA - The replica scheduling strategy places and maintains your desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. DAEMON - The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks. It also stops tasks that don't meet the placement constraints. When using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. You can use UpdateService. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%. If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%. If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide", options: [ { name: "--cluster", @@ -613,7 +613,7 @@ const completionSpec: Fig.Spec = { { name: "--deployment-configuration", description: - "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks", + "Optional deployment parameters that control how many tasks run during the deployment and the failure detection methods", args: { name: "structure", isVariadic: true, @@ -1427,6 +1427,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-service-deployments", + description: + "Describes one or more of your service deployments. A service deployment happens when you release a software update for the service. For more information, see Amazon ECS service deployments", + options: [ + { + name: "--service-deployment-arns", + description: + "The ARN of the service deployment. You can specify a maximum of 20 ARNs", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-service-revisions", + description: + "Describes one or more service revisions. A service revision is a version of the service that includes the values for the Amazon ECS resources (for example, task definition) and the environment resources (for example, load balancers, subnets, and security groups). For more information, see Amazon ECS service revisions. You can't describe a service revision that was created before October 25, 2024", + options: [ + { + name: "--service-revision-arns", + description: + "The ARN of the service revision. You can specify a maximum of 20 ARNs. You can call ListServiceDeployments to get the ARNs", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-services", description: "Describes the specified services running in your cluster", @@ -2142,6 +2206,77 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-service-deployments", + description: + "This operation lists all the service deployments that meet the specified filter criteria. A service deployment happens when you release a softwre update for the service. You route traffic from the running service revisions to the new service revison and control the number of running tasks. This API returns the values that you use for the request parameters in DescribeServiceRevisions", + options: [ + { + name: "--service", + description: "The ARN or name of the service", + args: { + name: "string", + }, + }, + { + name: "--cluster", + description: + "The cluster that hosts the service. This can either be the cluster name or ARN. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performanceIf you don't specify a cluster, deault is used", + args: { + name: "string", + }, + }, + { + name: "--status", + description: + "An optional filter you can use to narrow the results. If you do not specify a status, then all status values are included in the result", + args: { + name: "list", + }, + }, + { + name: "--created-at", + description: + "An optional filter you can use to narrow the results by the service creation date. If you do not specify a value, the result includes all services created before the current time. The format is yyyy-MM-dd HH:mm:ss.SSSSSS", + args: { + name: "structure", + }, + }, + { + name: "--next-token", + description: + "The nextToken value returned from a ListServiceDeployments request indicating that more results are available to fulfill the request and further calls are needed. If you provided maxResults, it's possible the number of results is fewer than maxResults", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of service deployment results that ListServiceDeployments returned in paginated output. When this parameter is used, ListServiceDeployments only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another ListServiceDeployments request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListServiceDeployments returns up to 20 results and a nextToken value if applicable", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-services", description: @@ -2973,7 +3108,7 @@ const completionSpec: Fig.Spec = { { name: "--network-mode", description: - "The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used", + "The Docker networking mode to use for the containers in the task. The valid values are none, bridge, awsvpc, and host. If no network mode is specified, the default is bridge. For Amazon ECS tasks on Fargate, the awsvpc network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, or awsvpc can be used. If the network mode is set to none, you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The host and awsvpc network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the bridge mode. With the host and awsvpc network modes, exposed container ports are mapped directly to the corresponding host port (for the host network mode) or the attached elastic network interface port (for the awsvpc network mode), so you cannot take advantage of dynamic host port mappings. When using the host network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. If the network mode is awsvpc, the task is allocated an elastic network interface, and you must specify a NetworkConfiguration value when you create a service or run a task with the task definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. For more information, see Network settings in the Docker run reference", args: { name: "string", suggestions: ["bridge", "host", "awsvpc", "none"], @@ -3045,7 +3180,7 @@ const completionSpec: Fig.Spec = { { name: "--pid-mode", description: - "The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate", + "The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task. If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference. If the host PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see Docker security. This parameter is not supported for Windows containers. This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate", args: { name: "string", suggestions: ["host", "task"], @@ -3054,7 +3189,7 @@ const completionSpec: Fig.Spec = { { name: "--ipc-mode", description: - "The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate", + "The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none. If host is specified, then all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference. If the host IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see Docker security. If you are setting namespaced kernel parameters using systemControls for the containers in the task, the following will apply to your IPC resource namespace. For more information, see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace related systemControls are not supported. For tasks that use the task IPC mode, IPC namespace related systemControls will apply to all containers within a task. This parameter is not supported for Windows containers or tasks run on Fargate", args: { name: "string", suggestions: ["host", "task", "none"], @@ -4135,7 +4270,7 @@ const completionSpec: Fig.Spec = { { name: "--deployment-configuration", description: - "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks", + "Optional deployment parameters that control how many tasks run during the deployment and the failure detection methods", args: { name: "structure", suggestions: [ diff --git a/src/aws/elbv2.ts b/src/aws/elbv2.ts index 95e343cd9c1..658cd3f40f7 100644 --- a/src/aws/elbv2.ts +++ b/src/aws/elbv2.ts @@ -133,7 +133,7 @@ const completionSpec: Fig.Spec = { { name: "--protocol", description: - "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer", + "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer", args: { name: "string", }, @@ -141,7 +141,7 @@ const completionSpec: Fig.Spec = { { name: "--port", description: - "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer", + "The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer", args: { name: "integer", }, @@ -226,7 +226,7 @@ const completionSpec: Fig.Spec = { { name: "--subnets", description: - "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", + "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones", args: { name: "list", }, @@ -234,7 +234,7 @@ const completionSpec: Fig.Spec = { { name: "--subnet-mappings", description: - "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets", + "The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets", args: { name: "list", }, @@ -250,7 +250,7 @@ const completionSpec: Fig.Spec = { { name: "--scheme", description: - "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet. The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer. The default is an Internet-facing load balancer. You cannot specify a scheme for a Gateway Load Balancer", + "The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet. The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer. The default is an Internet-facing load balancer. You can't specify a scheme for a Gateway Load Balancer", args: { name: "string", }, @@ -272,7 +272,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "The IP address type. Internal load balancers must use ipv4. [Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses). [Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses)", args: { name: "string", }, @@ -285,6 +285,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--enable-prefix-for-ipv6-source-nat", + description: + "[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -428,12 +436,12 @@ const completionSpec: Fig.Spec = { { name: "--health-check-enabled", description: - "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and cannot be disabled", + "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and can't be disabled", }, { name: "--no-health-check-enabled", description: - "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and cannot be disabled", + "Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and can't be disabled", }, { name: "--health-check-path", @@ -500,8 +508,7 @@ const completionSpec: Fig.Spec = { }, { name: "--ip-address-type", - description: - "The type of IP address used for this target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4", + description: "The IP address type. The default value is ipv4", args: { name: "string", }, @@ -532,7 +539,7 @@ const completionSpec: Fig.Spec = { { name: "--name", description: - "The name of the trust store. This name must be unique per region and cannot be changed after creation", + "The name of the trust store. This name must be unique per region and can't be changed after creation", args: { name: "string", }, @@ -1728,7 +1735,7 @@ const completionSpec: Fig.Spec = { { name: "--port", description: - "The port for connections from clients to the load balancer. You cannot specify a port for a Gateway Load Balancer", + "The port for connections from clients to the load balancer. You can't specify a port for a Gateway Load Balancer", args: { name: "integer", }, @@ -1736,7 +1743,7 @@ const completionSpec: Fig.Spec = { { name: "--protocol", description: - "The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can\u2019t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer", + "The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can\u2019t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer", args: { name: "string", }, @@ -2116,7 +2123,7 @@ const completionSpec: Fig.Spec = { { name: "register-targets", description: - "Registers the specified targets with the specified target group. If the target is an EC2 instance, it must be in the running state when you register it. By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports. With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address", + "Registers the specified targets with the specified target group. If the target is an EC2 instance, it must be in the running state when you register it. By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports. With a Network Load Balancer, you can't register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address", options: [ { name: "--target-group-arn", @@ -2282,7 +2289,7 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "The IP address type. Internal load balancers must use ipv4. [Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses). Application Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses)", args: { name: "string", }, @@ -2398,7 +2405,7 @@ const completionSpec: Fig.Spec = { { name: "--subnets", description: - "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", + "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones", args: { name: "list", }, @@ -2406,7 +2413,7 @@ const completionSpec: Fig.Spec = { { name: "--subnet-mappings", description: - "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", + "The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings. [Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets. [Application Load Balancers on Outposts] You must specify one Outpost subnet. [Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones. [Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet. [Gateway Load Balancers] You can specify subnets from one or more Availability Zones", args: { name: "list", }, @@ -2414,7 +2421,15 @@ const completionSpec: Fig.Spec = { { name: "--ip-address-type", description: - "[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can\u2019t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses)", + "The IP address type. [Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses). [Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses)", + args: { + name: "string", + }, + }, + { + name: "--enable-prefix-for-ipv6-source-nat", + description: + "[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off", args: { name: "string", }, diff --git a/src/aws/firehose.ts b/src/aws/firehose.ts index 9c0f434dba0..ebcb941f3a4 100644 --- a/src/aws/firehose.ts +++ b/src/aws/firehose.ts @@ -6,12 +6,12 @@ const completionSpec: Fig.Spec = { { name: "create-delivery-stream", description: - "Creates a Firehose delivery stream. By default, you can create up to 50 delivery streams per Amazon Web Services Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide", + "Creates a Firehose stream. By default, you can create up to 50 Firehose streams per Amazon Web Services Region. This is an asynchronous operation that immediately returns. The initial status of the Firehose stream is CREATING. After the Firehose stream is created, its status is ACTIVE and it now accepts data. If the Firehose stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a Firehose stream, use DescribeDeliveryStream. If the status of a Firehose stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Firehose stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a Firehose stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing Firehose stream that doesn't have SSE enabled. A Firehose stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide", options: [ { name: "--delivery-stream-name", description: - "The name of the delivery stream. This name must be unique per Amazon Web Services account in the same Amazon Web Services Region. If the delivery streams are in different accounts or different Regions, you can have multiple delivery streams with the same name", + "The name of the Firehose stream. This name must be unique per Amazon Web Services account in the same Amazon Web Services Region. If the Firehose streams are in different accounts or different Regions, you can have multiple Firehose streams with the same name", args: { name: "string", }, @@ -19,7 +19,7 @@ const completionSpec: Fig.Spec = { { name: "--delivery-stream-type", description: - "The delivery stream type. This parameter can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source", + "The Firehose stream type. This parameter can be one of the following values: DirectPut: Provider applications access the Firehose stream directly. KinesisStreamAsSource: The Firehose stream uses a Kinesis data stream as a source", args: { name: "string", }, @@ -27,7 +27,7 @@ const completionSpec: Fig.Spec = { { name: "--kinesis-stream-source-configuration", description: - "When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream", + "When a Kinesis data stream is used as the source for the Firehose stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream", args: { name: "structure", }, @@ -99,7 +99,7 @@ const completionSpec: Fig.Spec = { { name: "--tags", description: - "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. You can specify up to 50 tags when creating a delivery stream. If you specify tags in the CreateDeliveryStream action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an AccessDeniedException such as following. AccessDeniedException User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy. For an example IAM policy, see Tag example", + "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. You can specify up to 50 tags when creating a Firehose stream. If you specify tags in the CreateDeliveryStream action, Amazon Data Firehose performs an additional authorization on the firehose:TagDeliveryStream action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an AccessDeniedException such as following. AccessDeniedException User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy. For an example IAM policy, see Tag example", args: { name: "list", }, @@ -129,8 +129,15 @@ const completionSpec: Fig.Spec = { }, { name: "--iceberg-destination-configuration", + description: "Configure Apache Iceberg Tables destination", + args: { + name: "structure", + }, + }, + { + name: "--database-source-configuration", description: - "Configure Apache Iceberg Tables destination. Amazon Data Firehose is in preview release and is subject to change", + "Amazon Data Firehose is in preview release and is subject to change", args: { name: "structure", }, @@ -157,11 +164,11 @@ const completionSpec: Fig.Spec = { { name: "delete-delivery-stream", description: - "Deletes a delivery stream and its data. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream. DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream. Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed", + "Deletes a Firehose stream and its data. You can delete a Firehose stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a Firehose stream that is in the CREATING state. To check the state of a Firehose stream, use DescribeDeliveryStream. DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the Firehose stream is marked for deletion, and it goes into the DELETING state.While the Firehose stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a Firehose stream. Removal of a Firehose stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed", options: [ { name: "--delivery-stream-name", - description: "The name of the delivery stream", + description: "The name of the Firehose stream", args: { name: "string", }, @@ -169,12 +176,12 @@ const completionSpec: Fig.Spec = { { name: "--allow-force-delete", description: - "Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation. The default value is false", + "Set this to true if you want to delete the Firehose stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation. The default value is false", }, { name: "--no-allow-force-delete", description: - "Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation. The default value is false", + "Set this to true if you want to delete the Firehose stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation. The default value is false", }, { name: "--cli-input-json", @@ -198,11 +205,11 @@ const completionSpec: Fig.Spec = { { name: "describe-delivery-stream", description: - "Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true", + "Describes the specified Firehose stream and its status. For example, after your Firehose stream is created, call DescribeDeliveryStream to see whether the Firehose stream is ACTIVE and therefore ready for data to be sent to it. If the status of a Firehose stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true", options: [ { name: "--delivery-stream-name", - description: "The name of the delivery stream", + description: "The name of the Firehose stream", args: { name: "string", }, @@ -210,7 +217,7 @@ const completionSpec: Fig.Spec = { { name: "--limit", description: - "The limit on the number of destinations to return. You can have one destination per delivery stream", + "The limit on the number of destinations to return. You can have one destination per Firehose stream", args: { name: "integer", }, @@ -218,7 +225,7 @@ const completionSpec: Fig.Spec = { { name: "--exclusive-start-destination-id", description: - "The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream", + "The ID of the destination to start returning the destination information. Firehose supports one destination per Firehose stream", args: { name: "string", }, @@ -245,12 +252,12 @@ const completionSpec: Fig.Spec = { { name: "list-delivery-streams", description: - "Lists your delivery streams in alphabetical order of their names. The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call", + "Lists your Firehose streams in alphabetical order of their names. The number of Firehose streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of Firehose streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more Firehose streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last Firehose stream returned in the last call", options: [ { name: "--limit", description: - "The maximum number of delivery streams to list. The default value is 10", + "The maximum number of Firehose streams to list. The default value is 10", args: { name: "integer", }, @@ -258,7 +265,7 @@ const completionSpec: Fig.Spec = { { name: "--delivery-stream-type", description: - "The delivery stream type. This can be one of the following values: DirectPut: Provider applications access the delivery stream directly. KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a source. This parameter is optional. If this parameter is omitted, delivery streams of all types are returned", + "The Firehose stream type. This can be one of the following values: DirectPut: Provider applications access the Firehose stream directly. KinesisStreamAsSource: The Firehose stream uses a Kinesis data stream as a source. This parameter is optional. If this parameter is omitted, Firehose streams of all types are returned", args: { name: "string", }, @@ -266,7 +273,7 @@ const completionSpec: Fig.Spec = { { name: "--exclusive-start-delivery-stream-name", description: - "The list of delivery streams returned by this call to ListDeliveryStreams will start with the delivery stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName", + "The list of Firehose streams returned by this call to ListDeliveryStreams will start with the Firehose stream whose name comes alphabetically immediately after the name you specify in ExclusiveStartDeliveryStreamName", args: { name: "string", }, @@ -293,12 +300,12 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-delivery-stream", description: - "Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account", + "Lists the tags for the specified Firehose stream. This operation has a limit of five transactions per second per account", options: [ { name: "--delivery-stream-name", description: - "The name of the delivery stream whose tags you want to list", + "The name of the Firehose stream whose tags you want to list", args: { name: "string", }, @@ -314,7 +321,7 @@ const completionSpec: Fig.Spec = { { name: "--limit", description: - "The number of tags to return. If this number is less than the total number of tags associated with the delivery stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response", + "The number of tags to return. If this number is less than the total number of tags associated with the Firehose stream, HasMoreTags is set to true in the response. To list additional tags, set ExclusiveStartTagKey to the last key in the response", args: { name: "integer", }, @@ -341,11 +348,11 @@ const completionSpec: Fig.Spec = { { name: "put-record", description: - "Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits. Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding", + "Writes a single data record into an Firehose stream. To write multiple data records into a Firehose stream, use PutRecordBatch. Applications using these operations are referred to as producers. By default, each Firehose stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each Firehose stream. For more information about limits and how to request an increase, see Amazon Firehose Limits. Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on. For multi record de-aggregation, you can not put more than 500 records even if the data blob length is less than 1000 KiB. If you include more than 500 records, the request succeeds but the record de-aggregation doesn't work as expected and transformation lambda is invoked with the complete base64 encoded data blob instead of de-aggregated base64 decoded records. Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation. If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the Firehose stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Firehose are stored for 24 hours from the time they are added to a Firehose stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding", options: [ { name: "--delivery-stream-name", - description: "The name of the delivery stream", + description: "The name of the Firehose stream", args: { name: "string", }, @@ -379,11 +386,11 @@ const completionSpec: Fig.Spec = { { name: "put-record-batch", description: - "Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers. Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding", + "Writes multiple data records into a Firehose stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a Firehose stream, use PutRecord. Applications using these operations are referred to as producers. Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics. For information about service quota, see Amazon Firehose Quota. Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed. You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on. For multi record de-aggregation, you can not put more than 500 records even if the data blob length is less than 1000 KiB. If you include more than 500 records, the request succeeds but the record de-aggregation doesn't work as expected and transformation lambda is invoked with the complete base64 encoded data blob instead of de-aggregated base64 decoded records. Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination. The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records. A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error. If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination. If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the Firehose stream. Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations. Data records sent to Firehose are stored for 24 hours from the time they are added to a Firehose stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available. Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding", options: [ { name: "--delivery-stream-name", - description: "The name of the delivery stream", + description: "The name of the Firehose stream", args: { name: "string", }, @@ -417,12 +424,12 @@ const completionSpec: Fig.Spec = { { name: "start-delivery-stream-encryption", description: - "Enables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a delivery stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, the Firehose API operations StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period", + "Enables server-side encryption (SSE) for the Firehose stream. This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a Firehose stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your Firehose stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the Firehose stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption status of a Firehose stream, use DescribeDeliveryStream. Even if encryption is currently enabled for a Firehose stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS grant creation to be successful, the Firehose API operations StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old. If a Firehose stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK. If the encryption status of your Firehose stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations. You can enable SSE for a Firehose stream only if it's a Firehose stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per Firehose stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same Firehose stream in a 24-hour period", options: [ { name: "--delivery-stream-name", description: - "The name of the delivery stream for which you want to enable server-side encryption (SSE)", + "The name of the Firehose stream for which you want to enable server-side encryption (SSE)", args: { name: "string", }, @@ -457,12 +464,12 @@ const completionSpec: Fig.Spec = { { name: "stop-delivery-stream-encryption", description: - "Disables server-side encryption (SSE) for the delivery stream. This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a delivery stream, use DescribeDeliveryStream. If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period", + "Disables server-side encryption (SSE) for the Firehose stream. This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the Firehose stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively. To check the encryption state of a Firehose stream, use DescribeDeliveryStream. If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination. The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per Firehose stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same Firehose stream in a 24-hour period", options: [ { name: "--delivery-stream-name", description: - "The name of the delivery stream for which you want to disable server-side encryption (SSE)", + "The name of the Firehose stream for which you want to disable server-side encryption (SSE)", args: { name: "string", }, @@ -489,12 +496,12 @@ const completionSpec: Fig.Spec = { { name: "tag-delivery-stream", description: - "Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. Each delivery stream can have up to 50 tags. This operation has a limit of five transactions per second per account", + "Adds or updates tags for the specified Firehose stream. A tag is a key-value pair that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide. Each Firehose stream can have up to 50 tags. This operation has a limit of five transactions per second per account", options: [ { name: "--delivery-stream-name", description: - "The name of the delivery stream to which you want to add the tags", + "The name of the Firehose stream to which you want to add the tags", args: { name: "string", }, @@ -528,11 +535,11 @@ const completionSpec: Fig.Spec = { { name: "untag-delivery-stream", description: - "Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes. If you specify a tag that doesn't exist, the operation ignores it. This operation has a limit of five transactions per second per account", + "Removes tags from the specified Firehose stream. Removed tags are deleted, and you can't recover them after this operation successfully completes. If you specify a tag that doesn't exist, the operation ignores it. This operation has a limit of five transactions per second per account", options: [ { name: "--delivery-stream-name", - description: "The name of the delivery stream", + description: "The name of the Firehose stream", args: { name: "string", }, @@ -567,11 +574,11 @@ const completionSpec: Fig.Spec = { { name: "update-destination", description: - "Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination. If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified. Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call", + "Updates the specified destination of the specified Firehose stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target Firehose stream remains active while the configurations are updated, so data writes to the Firehose stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination. If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified. Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call", options: [ { name: "--delivery-stream-name", - description: "The name of the delivery stream", + description: "The name of the Firehose stream", args: { name: "string", }, @@ -663,7 +670,7 @@ const completionSpec: Fig.Spec = { { name: "--iceberg-destination-update", description: - "Describes an update for a destination in Apache Iceberg Tables. Amazon Data Firehose is in preview release and is subject to change", + "Describes an update for a destination in Apache Iceberg Tables", args: { name: "structure", }, diff --git a/src/aws/fis.ts b/src/aws/fis.ts index d96c34e5235..33d020488a0 100644 --- a/src/aws/fis.ts +++ b/src/aws/fis.ts @@ -73,6 +73,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--experiment-report-configuration", + description: + "The experiment report configuration for the experiment template", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -995,6 +1003,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--experiment-report-configuration", + description: + "The experiment report configuration for the experiment template", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/gamelift.ts b/src/aws/gamelift.ts index 28acace4cde..f62aee0c404 100644 --- a/src/aws/gamelift.ts +++ b/src/aws/gamelift.ts @@ -164,12 +164,12 @@ const completionSpec: Fig.Spec = { { name: "create-build", description: - "Creates a new Amazon GameLift build resource for your game server binary files. Combine game server binaries into a zip file for use with Amazon GameLift. When setting up a new game build for Amazon GameLift, we recommend using the CLI command upload-build . This helper command combines two tasks: (1) it uploads your build files from a file directory to an Amazon GameLift Amazon S3 location, and (2) it creates a new build resource. You can use the CreateBuild operation in the following scenarios: Create a new game build with build files that are in an Amazon S3 location under an Amazon Web Services account that you control. To use this option, you give Amazon GameLift access to the Amazon S3 bucket. With permissions in place, specify a build name, operating system, and the Amazon S3 storage location of your game build. Upload your build files to a Amazon GameLift Amazon S3 location. To use this option, specify a build name and operating system. This operation creates a new build resource and also returns an Amazon S3 location with temporary access credentials. Use the credentials to manually upload your build files to the specified Amazon S3 location. For more information, see Uploading Objects in the Amazon S3 Developer Guide. After you upload build files to the Amazon GameLift Amazon S3 location, you can't update them. If successful, this operation creates a new build resource with a unique build ID and places it in INITIALIZED status. A build must be in READY status before you can create fleets with it. Learn more Uploading Your Game Create a Build with Files in Amazon S3 All APIs by task", + "Creates an Amazon GameLift build resource for your game server software and stores the software for deployment to hosting resources. Combine game server binaries and dependencies into a single .zip file Use the CLI command upload-build to quickly and simply create a new build and upload your game build .zip file to Amazon GameLift Amazon S3. This helper command eliminates the need to explicitly manage access permissions. Alternatively, use the CreateBuild action for the following scenarios: You want to create a build and upload a game build zip file from in an Amazon S3 location that you control. In this scenario, you need to give Amazon GameLift permission to access to the Amazon S3 bucket. With permission in place, call CreateBuild and specify a build name, the build's runtime operating system, and the Amazon S3 storage location where the build file is stored. You want to create a build and upload a local game build zip file to an Amazon S3 location that's controlled by Amazon GameLift. (See the upload-build CLI command for this scenario.) In this scenario, you need to request temporary access credentials to the Amazon GameLift Amazon S3 location. Specify a build name and the build's runtime operating system. The response provides an Amazon S3 location and a set of temporary access credentials. Use the credentials to upload your build files to the specified Amazon S3 location (see Uploading Objects in the Amazon S3 Developer Guide). You can't update build files after uploading them to Amazon GameLift Amazon S3. If successful, this action creates a new build resource with a unique build ID and places it in INITIALIZED status. When the build reaches READY status, you can create fleets with it. Learn more Uploading Your Game Create a Build with Files in Amazon S3 All APIs by task", options: [ { name: "--name", description: - "A descriptive label associated with a build. Build names don't need to be unique. You can change this value later", + "A descriptive label that is associated with a build. Build names do not need to be unique. You can change this value later", args: { name: "string", }, @@ -185,7 +185,7 @@ const completionSpec: Fig.Spec = { { name: "--operating-system", description: - "The operating system that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", + "The environment that your game server binaries run on. This value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. This parameter is required, and there's no default value. You can't change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", args: { name: "string", }, @@ -209,7 +209,7 @@ const completionSpec: Fig.Spec = { { name: "--build-version", description: - "Version information associated with a build or script. Version strings don't need to be unique. You can change this value later", + "Version information that is associated with a build or script. Version strings do not need to be unique. You can change this value later", args: { name: "string", }, @@ -233,10 +233,153 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-container-fleet", + description: + "Creates a managed fleet of Amazon Elastic Compute Cloud (Amazon EC2) instances to host your containerized game servers. Use this operation to define how to deploy a container architecture onto each fleet instance and configure fleet settings. You can create a container fleet in any Amazon Web Services Regions that Amazon GameLift supports for multi-location fleets. A container fleet can be deployed to a single location or multiple locations. Container fleets are deployed with Amazon Linux 2023 as the instance operating system. Define the fleet's container architecture using container group definitions. Each fleet can have one of the following container group types: The game server container group runs your game server build and dependent software. Amazon GameLift deploys one or more replicas of this container group to each fleet instance. The number of replicas depends on the computing capabilities of the fleet instance in use. An optional per-instance container group might be used to run other software that only needs to run once per instance, such as background services, logging, or test processes. One per-instance container group is deployed to each fleet instance. Each container group can include the definition for one or more containers. A container definition specifies a container image that is stored in an Amazon Elastic Container Registry (Amazon ECR) public or private repository. Request options Use this operation to make the following types of requests. Most fleet settings have default values, so you can create a working fleet with a minimal configuration and default values, which you can customize later. Create a fleet with no container groups. You can configure a container fleet and then add container group definitions later. In this scenario, no fleet instances are deployed, and the fleet can't host game sessions until you add a game server container group definition. Provide the following required parameter values: FleetRoleArn Create a fleet with a game server container group. Provide the following required parameter values: FleetRoleArn GameServerContainerGroupDefinitionName Create a fleet with a game server container group and a per-instance container group. Provide the following required parameter values: FleetRoleArn GameServerContainerGroupDefinitionName PerInstanceContainerGroupDefinitionName Results If successful, this operation creates a new container fleet resource, places it in PENDING status, and initiates the fleet creation workflow. For fleets with container groups, this workflow starts a fleet deployment and transitions the status to ACTIVE. Fleets without a container group are placed in CREATED status. You can update most of the properties of a fleet, including container group definitions, and deploy the update across all fleet instances. Use a fleet update to deploy a new game server version update across the container fleet", + options: [ + { + name: "--fleet-role-arn", + description: + "The unique identifier for an Identity and Access Management (IAM) role with permissions to run your containers on resources that are managed by Amazon GameLift. Use an IAM service role with the GameLiftContainerFleetPolicy managed policy attached. For more information, see Set up an IAM service role. You can't change this fleet property after the fleet is created. IAM role ARN values use the following pattern: arn:aws:iam::[Amazon Web Services account]:role/[role name]", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "A meaningful description of the container fleet", + args: { + name: "string", + }, + }, + { + name: "--game-server-container-group-definition-name", + description: + "A container group definition resource that describes how to deploy containers with your game server build and support software onto each fleet instance. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource", + args: { + name: "string", + }, + }, + { + name: "--per-instance-container-group-definition-name", + description: + "The name of a container group definition resource that describes a set of axillary software. A fleet instance has one process for executables in this container group. A per-instance container group is optional. You can update the fleet to add or remove a per-instance container group at any time. You can specify the container group definition's name to use the latest version. Alternatively, provide an ARN value with a specific version number. Create a container group definition by calling CreateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource", + args: { + name: "string", + }, + }, + { + name: "--instance-connection-port-range", + description: + "The set of port numbers to open on each fleet instance. A fleet's connection ports map to container ports that are configured in the fleet's container group definitions. By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are: Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition] You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's inbound permissions port range. If you set values manually, Amazon GameLift no longer calculates a port range for you, even if you later remove the manual settings", + args: { + name: "structure", + }, + }, + { + name: "--instance-inbound-permissions", + description: + "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. As a best practice, when remotely accessing a fleet instance, we recommend opening ports only when you need them and closing them when you're finished. By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. To use the calculated range, don't set this parameter. The values are: Protocol: UDP Port range: 4192 to a number calculated based on your fleet configuration. Amazon GameLift uses the following formula: 4192 + [# of game server container groups per fleet instance] * [# of container ports in the game server container group definition] + [# of container ports in the game server container group definition] You can also choose to manually set this parameter. When manually setting this parameter, you must use port numbers that match the fleet's connection port range. If you set values manually, Amazon GameLift no longer calculates a port range for you, even if you later remove the manual settings", + args: { + name: "list", + }, + }, + { + name: "--game-server-container-groups-per-instance", + description: + "The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. This calculation is based on the CPU and memory resources of the fleet's instance type). To use the calculated maximum, don't set this parameter. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum", + args: { + name: "integer", + }, + }, + { + name: "--instance-type", + description: + "The Amazon EC2 instance type to use for all instances in the fleet. For multi-location fleets, the instance type must be available in the home region and all remote locations. Instance type determines the computing resources and processing power that's available to host your game servers. This includes including CPU, memory, storage, and networking capacity. By default, Amazon GameLift selects an instance type that fits the needs of your container groups and is available in all selected fleet locations. You can also choose to manually set this parameter. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types. You can't update this fleet property later", + args: { + name: "string", + }, + }, + { + name: "--billing-type", + description: + "Indicates whether to use On-Demand or Spot instances for this fleet. Learn more about when to use On-Demand versus Spot Instances. This fleet property can't be changed after the fleet is created. By default, this property is set to ON_DEMAND. You can't update this fleet property later", + args: { + name: "string", + }, + }, + { + name: "--locations", + description: + "A set of locations to deploy container fleet instances to. You can add any Amazon Web Services Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more Amazon Web Services Region codes, such as us-west-2, or Local Zone names. Also include the fleet's home Region, which is the Amazon Web Services Region where the fleet is created. For a list of supported Regions and Local Zones, see Amazon GameLift service locations for managed hosting", + args: { + name: "list", + }, + }, + { + name: "--metric-groups", + description: + "The name of an Amazon Web Services CloudWatch metric group to add this fleet to. You can use a metric group to aggregate metrics for multiple fleets. You can specify an existing metric group name or use a new name to create a new metric group. Each fleet can have only one metric group, but you can change this value at any time", + args: { + name: "list", + }, + }, + { + name: "--new-game-session-protection-policy", + description: + "Determines whether Amazon GameLift can shut down game sessions on the fleet that are actively running and hosting players. Amazon GameLift might prompt an instance shutdown when scaling down fleet capacity or when retiring unhealthy instances. You can also set game session protection for individual game sessions using UpdateGameSession. NoProtection -- Game sessions can be shut down during active gameplay. FullProtection -- Game sessions in ACTIVE status can't be shut down. By default, this property is set to NoProtection", + args: { + name: "string", + }, + }, + { + name: "--game-session-creation-limit-policy", + description: + "A policy that limits the number of game sessions that each individual player can create on instances in this fleet. The limit applies for a specified span of time", + args: { + name: "structure", + }, + }, + { + name: "--log-configuration", + description: + "A method for collecting container logs for the fleet. Amazon GameLift saves all standard output for each container in logs, including game session logs. You can select from the following methods: CLOUDWATCH -- Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group. S3 -- Store logs in an Amazon S3 bucket that you define. NONE -- Don't collect container logs. By default, this property is set to CLOUDWATCH. Amazon GameLift requires permissions to send logs other Amazon Web Services services in your account. These permissions are included in the IAM fleet role for this container fleet (see FleetRoleArn)", + args: { + name: "structure", + }, + }, + { + name: "--tags", + description: + "A list of labels to assign to the new fleet resource. Tags are developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for resource management, access management and cost allocation. For more information, see Tagging Amazon Web Services Resources in the Amazon Web Services General Reference", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-container-group-definition", description: - 'This operation is used with the Amazon GameLift containers feature, which is currently in public preview. Creates a ContainerGroupDefinition resource that describes a set of containers for hosting your game server with Amazon GameLift managed EC2 hosting. An Amazon GameLift container group is similar to a container "task" and "pod". Each container group can have one or more containers. Use container group definitions when you create a container fleet. Container group definitions determine how Amazon GameLift deploys your containers to each instance in a container fleet. You can create two types of container groups, based on scheduling strategy: A replica container group manages the containers that run your game server application and supporting software. Replica container groups might be replicated multiple times on each fleet instance, depending on instance resources. A daemon container group manages containers that run other software, such as background services, logging, or test processes. You might use a daemon container group for processes that need to run only once per fleet instance, or processes that need to persist independently of the replica container group. To create a container group definition, specify a group name, a list of container definitions, and maximum total CPU and memory requirements for the container group. Specify an operating system and scheduling strategy or use the default values. When using the Amazon Web Services CLI tool, you can pass in your container definitions as a JSON file. This operation requires Identity and Access Management (IAM) permissions to access container images in Amazon ECR repositories. See IAM permissions for Amazon GameLift for help setting the appropriate permissions. If successful, this operation creates a new ContainerGroupDefinition resource with an ARN value assigned. You can\'t change the properties of a container group definition. Instead, create a new one. Learn more Create a container group definition Container fleet design guide Create a container definition as a JSON file', + "Creates a ContainerGroupDefinition that describes a set of containers for hosting your game server with Amazon GameLift managed containers hosting. An Amazon GameLift container group is similar to a container task or pod. Use container group definitions when you create a container fleet with CreateContainerFleet. A container group definition determines how Amazon GameLift deploys your containers to each instance in a container fleet. You can maintain multiple versions of a container group definition. There are two types of container groups: A game server container group has the containers that run your game server application and supporting software. A game server container group can have these container types: Game server container. This container runs your game server. You can define one game server container in a game server container group. Support container. This container runs software in parallel with your game server. You can define up to 8 support containers in a game server group. When building a game server container group definition, you can choose to bundle your game server executable and all dependent software into a single game server container. Alternatively, you can separate the software into one game server container and one or more support containers. On a container fleet instance, a game server container group can be deployed multiple times (depending on the compute resources of the instance). This means that all containers in the container group are replicated together. A per-instance container group has containers for processes that aren't replicated on a container fleet instance. This might include background services, logging, test processes, or processes that need to persist independently of the game server container group. When building a per-instance container group, you can define up to 10 support containers. This operation requires Identity and Access Management (IAM) permissions to access container images in Amazon ECR repositories. See IAM permissions for Amazon GameLift for help setting the appropriate permissions. Request options Use this operation to make the following types of requests. You can specify values for the minimum required parameters and customize optional values later. Create a game server container group definition. Provide the following required parameter values: Name ContainerGroupType (GAME_SERVER) OperatingSystem (omit to use default value) TotalMemoryLimitMebibytes (omit to use default value) TotalVcpuLimit (omit to use default value) At least one GameServerContainerDefinition ContainerName ImageUrl PortConfiguration ServerSdkVersion (omit to use default value) Create a per-instance container group definition. Provide the following required parameter values: Name ContainerGroupType (PER_INSTANCE) OperatingSystem (omit to use default value) TotalMemoryLimitMebibytes (omit to use default value) TotalVcpuLimit (omit to use default value) At least one SupportContainerDefinition ContainerName ImageUrl Results If successful, this request creates a ContainerGroupDefinition resource and assigns a unique ARN value. You can update most properties of a container group definition by calling UpdateContainerGroupDefinition, and optionally save the update as a new version", options: [ { name: "--name", @@ -247,33 +390,41 @@ const completionSpec: Fig.Spec = { }, }, { - name: "--scheduling-strategy", + name: "--container-group-type", description: - "The method for deploying the container group across fleet instances. A replica container group might have multiple copies on each fleet instance. A daemon container group has one copy per fleet instance. Default value is REPLICA", + "The type of container group being defined. Container group type determines how Amazon GameLift deploys the container group on each fleet instance. Default value: GAME_SERVER", args: { name: "string", }, }, { - name: "--total-memory-limit", + name: "--total-memory-limit-mebibytes", description: - "The maximum amount of memory (in MiB) to allocate to the container group. All containers in the group share this memory. If you specify memory limits for individual containers, set this parameter based on the following guidelines. The value must be (1) greater than the sum of the soft memory limits for all containers in the group, and (2) greater than any individual container's hard memory limit", + "The maximum amount of memory (in MiB) to allocate to the container group. All containers in the group share this memory. If you specify memory limits for an individual container, the total value must be greater than any individual container's memory limit. Default value: 1024", args: { name: "integer", }, }, { - name: "--total-cpu-limit", + name: "--total-vcpu-limit", description: - "The maximum amount of CPU units to allocate to the container group. Set this parameter to an integer value in CPU units (1 vCPU is equal to 1024 CPU units). All containers in the group share this memory. If you specify CPU limits for individual containers, set this parameter based on the following guidelines. The value must be equal to or greater than the sum of the CPU limits for all containers in the group", + "The maximum amount of vCPU units to allocate to the container group (1 vCPU is equal to 1024 CPU units). All containers in the group share this memory. If you specify vCPU limits for individual containers, the total value must be equal to or greater than the sum of the CPU limits for all containers in the group. Default value: 1", args: { - name: "integer", + name: "double", + }, + }, + { + name: "--game-server-container-definition", + description: + "The definition for the game server container in this group. Define a game server container only when the container group type is GAME_SERVER. Game server containers specify a container image with your game server build. You can pass in your container definitions as a JSON file", + args: { + name: "structure", }, }, { - name: "--container-definitions", + name: "--support-container-definitions", description: - "Definitions for all containers in this group. Each container definition identifies the container image and specifies configuration settings for the container. See the Container fleet design guide for container guidelines", + "One or more definition for support containers in this group. You can define a support container in any type of container group. You can pass in your container definitions as a JSON file", args: { name: "list", }, @@ -281,7 +432,15 @@ const completionSpec: Fig.Spec = { { name: "--operating-system", description: - "The platform that is used by containers in the container group definition. All containers in a group must run on the same operating system. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", + "The platform that all containers in the group use. Containers in a group must run on the same operating system. Default value: AMAZON_LINUX_2023 Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", + args: { + name: "string", + }, + }, + { + name: "--version-description", + description: + "A description for the initial version of this container group definition", args: { name: "string", }, @@ -316,7 +475,7 @@ const completionSpec: Fig.Spec = { { name: "create-fleet", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Creates a fleet of compute resources to host your game servers. Use this operation to set up the following types of fleets based on compute type: Managed EC2 fleet An EC2 fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances. Your game server build is deployed to each fleet instance. Amazon GameLift manages the fleet's instances and controls the lifecycle of game server processes, which host game sessions for players. EC2 fleets can have instances in multiple locations. Each instance in the fleet is designated a Compute. To create an EC2 fleet, provide these required parameters: Either BuildId or ScriptId ComputeType set to EC2 (the default value) EC2InboundPermissions EC2InstanceType FleetType Name RuntimeConfiguration with at least one ServerProcesses configuration If successful, this operation creates a new fleet resource and places it in NEW status while Amazon GameLift initiates the fleet creation workflow. To debug your fleet, fetch logs, view performance metrics or other actions on the fleet, create a development fleet with port 22/3389 open. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. When the fleet status is ACTIVE, you can adjust capacity settings and turn autoscaling on/off for each location. Managed container fleet A container fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances. Your container architecture is deployed to each fleet instance based on the fleet configuration. Amazon GameLift manages the containers on each fleet instance and controls the lifecycle of game server processes, which host game sessions for players. Container fleets can have instances in multiple locations. Each container on an instance that runs game server processes is registered as a Compute. To create a container fleet, provide these required parameters: ComputeType set to CONTAINER ContainerGroupsConfiguration EC2InboundPermissions EC2InstanceType FleetType set to ON_DEMAND Name RuntimeConfiguration with at least one ServerProcesses configuration If successful, this operation creates a new fleet resource and places it in NEW status while Amazon GameLift initiates the fleet creation workflow. When the fleet status is ACTIVE, you can adjust capacity settings and turn autoscaling on/off for each location. Anywhere fleet An Anywhere fleet represents compute resources that are not owned or managed by Amazon GameLift. You might create an Anywhere fleet with your local machine for testing, or use one to host game servers with on-premises hardware or other game hosting solutions. To create an Anywhere fleet, provide these required parameters: ComputeType set to ANYWHERE Locations specifying a custom location Name If successful, this operation creates a new fleet resource and places it in ACTIVE status. You can register computes with a fleet in ACTIVE status. Learn more Setting up fleets Setting up a container fleet Debug fleet creation issues Multi-location fleets", + "Creates a fleet of compute resources to host your game servers. Use this operation to set up the following types of fleets based on compute type: Managed EC2 fleet An EC2 fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances. Your game server build is deployed to each fleet instance. Amazon GameLift manages the fleet's instances and controls the lifecycle of game server processes, which host game sessions for players. EC2 fleets can have instances in multiple locations. Each instance in the fleet is designated a Compute. To create an EC2 fleet, provide these required parameters: Either BuildId or ScriptId ComputeType set to EC2 (the default value) EC2InboundPermissions EC2InstanceType FleetType Name RuntimeConfiguration with at least one ServerProcesses configuration If successful, this operation creates a new fleet resource and places it in NEW status while Amazon GameLift initiates the fleet creation workflow. To debug your fleet, fetch logs, view performance metrics or other actions on the fleet, create a development fleet with port 22/3389 open. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. When the fleet status is ACTIVE, you can adjust capacity settings and turn autoscaling on/off for each location. Anywhere fleet An Anywhere fleet represents compute resources that are not owned or managed by Amazon GameLift. You might create an Anywhere fleet with your local machine for testing, or use one to host game servers with on-premises hardware or other game hosting solutions. To create an Anywhere fleet, provide these required parameters: ComputeType set to ANYWHERE Locations specifying a custom location Name If successful, this operation creates a new fleet resource and places it in ACTIVE status. You can register computes with a fleet in ACTIVE status. Learn more Setting up fleets Debug fleet creation issues Multi-location fleets", options: [ { name: "--name", @@ -376,7 +535,7 @@ const completionSpec: Fig.Spec = { { name: "--ec2-instance-type", description: - "The Amazon GameLift-supported Amazon EC2 instance type to use with EC2 and container fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types", + "The Amazon GameLift-supported Amazon EC2 instance type to use with managed EC2 fleets. Instance type determines the computing resources that will be used to host your game servers, including CPU, memory, storage, and networking capacity. See Amazon Elastic Compute Cloud Instance Types for detailed descriptions of Amazon EC2 instance types", args: { name: "string", }, @@ -384,7 +543,7 @@ const completionSpec: Fig.Spec = { { name: "--ec2-inbound-permissions", description: - "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for EC2 and container fleets. You can leave this parameter empty when creating the fleet, but you must call UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges. To manage inbound access for a container fleet, set this parameter to the same port numbers that you set for the fleet's connection port range. During the life of the fleet, update this parameter to control which connection ports are open to inbound traffic", + "The IP address ranges and port settings that allow inbound traffic to access game server processes and other processes on this fleet. Set this parameter for managed EC2 fleets. You can leave this parameter empty when creating the fleet, but you must call UpdateFleetPortSettings to set it before players can connect to game sessions. As a best practice, we recommend opening ports for remote access only when you need them and closing them when you're finished. For Realtime Servers fleets, Amazon GameLift automatically sets TCP and UDP ranges", args: { name: "list", }, @@ -400,7 +559,7 @@ const completionSpec: Fig.Spec = { { name: "--runtime-configuration", description: - "Instructions for how to launch and run server processes on the fleet. Set runtime configuration for EC2 fleets and container fleets. For an Anywhere fleets, set this parameter only if the fleet is running the Amazon GameLift Agent. The runtime configuration defines one or more server process configurations. Each server process identifies a game executable or Realtime script file and the number of processes to run concurrently. This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters, which are still supported for backward compatibility", + "Instructions for how to launch and run server processes on the fleet. Set runtime configuration for managed EC2 fleets. For an Anywhere fleets, set this parameter only if the fleet is running the Amazon GameLift Agent. The runtime configuration defines one or more server process configurations. Each server process identifies a game executable or Realtime script file and the number of processes to run concurrently. This parameter replaces the parameters ServerLaunchPath and ServerLaunchParameters, which are still supported for backward compatibility", args: { name: "structure", }, @@ -448,7 +607,7 @@ const completionSpec: Fig.Spec = { { name: "--instance-role-arn", description: - "A unique identifier for an IAM role with access permissions to other Amazon Web Services services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with Amazon Web Services resources that you own or have access to. For more information about using the role with your game server builds, see Communicate with other Amazon Web Services resources from your fleets. This fleet property can't be changed after the fleet is created", + "A unique identifier for an IAM role that manages access to your Amazon Web Services services. With an instance role ARN set, any application that runs on an instance in this fleet can assume the role, including install scripts, server processes, and daemons (background processes). Create a role or look up a role's ARN by using the IAM dashboard in the Amazon Web Services Management Console. Learn more about using on-box credentials for your game servers at Access external resources from a game server. This fleet property can't be changed after the fleet is created", args: { name: "string", }, @@ -480,7 +639,7 @@ const completionSpec: Fig.Spec = { { name: "--compute-type", description: - "The type of compute resource used to host your game servers. EC2 \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting. CONTAINER \u2013 Container images with your game server build and supporting software are deployed to Amazon EC2 instances for cloud hosting. With this compute type, you must specify the ContainerGroupsConfiguration parameter. ANYWHERE \u2013 Game servers or container images with your game server and supporting software are deployed to compute resources that are provided and managed by you. With this compute type, you can also set the AnywhereConfiguration parameter", + "The type of compute resource used to host your game servers. EC2 \u2013 The game server build is deployed to Amazon EC2 instances for cloud hosting. This is the default setting. ANYWHERE \u2013 Your game server and supporting software is deployed to compute resources that are provided and managed by you. With this compute type, you can also set the AnywhereConfiguration parameter", args: { name: "string", }, @@ -500,14 +659,6 @@ const completionSpec: Fig.Spec = { name: "string", }, }, - { - name: "--container-groups-configuration", - description: - "The container groups to deploy to instances in the container fleet and other fleet-level configuration settings. Use the CreateContainerGroupDefinition action to create container groups. A container fleet must have exactly one replica container group, and can optionally have one daemon container group. You can't change this property after you create the fleet", - args: { - name: "structure", - }, - }, { name: "--cli-input-json", description: @@ -530,7 +681,7 @@ const completionSpec: Fig.Spec = { { name: "create-fleet-locations", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Adds remote locations to an EC2 or container fleet and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting", + "Adds remote locations to a managed EC2 fleet or managed container fleet and begins populating the new locations with instances. The new instances conform to the fleet's instance type, auto-scaling, and other configuration settings. You can't add remote locations to a fleet that resides in an Amazon Web Services Region that doesn't support multiple locations. Fleets created prior to March 2021 can't support multiple locations. To add fleet locations, specify the fleet to be updated and provide a list of one or more locations. If successful, this operation returns the list of added locations with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets Update fleet locations Amazon GameLift service locations for managed hosting", options: [ { name: "--fleet-id", @@ -751,7 +902,7 @@ const completionSpec: Fig.Spec = { { name: "--game-session-data", description: - "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session (see Start a Game Session)", + "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session. For more information, see Start a game session", args: { name: "string", }, @@ -799,7 +950,7 @@ const completionSpec: Fig.Spec = { { name: "--timeout-in-seconds", description: - "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status. By default, this property is set to 600", + "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status", args: { name: "integer", }, @@ -807,7 +958,7 @@ const completionSpec: Fig.Spec = { { name: "--player-latency-policies", description: - "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value", + "A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value", args: { name: "list", }, @@ -991,7 +1142,7 @@ const completionSpec: Fig.Spec = { { name: "--additional-player-count", description: - "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if FlexMatchMode is set to STANDALONE", + "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE", args: { name: "integer", }, @@ -1015,7 +1166,7 @@ const completionSpec: Fig.Spec = { { name: "--game-session-data", description: - "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session (see Start a Game Session). This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE", + "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session. For more information, see Start a game session. This information is added to the new GameSession object that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE", args: { name: "string", }, @@ -1209,12 +1360,12 @@ const completionSpec: Fig.Spec = { { name: "create-script", description: - "Creates a new script record for your Realtime Servers script. Realtime scripts are JavaScript that provide configuration settings and optional custom game logic for your game. The script is deployed when you create a Realtime Servers fleet to host your game sessions. Script logic is executed during an active game session. To create a new script record, specify a script name and provide the script file(s). The script files and all dependencies must be zipped into a single file. You can pull the zip file from either of these locations: A locally available directory. Use the ZipFile parameter for this option. An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket. If the call is successful, a new script record is created with a unique script ID. If the script file is provided as a local file, the file is uploaded to an Amazon GameLift-owned S3 bucket and the script record's storage location reflects this location. If the script file is provided as an S3 bucket, Amazon GameLift accesses the file at this storage location as needed for deployment. Learn more Amazon GameLift Realtime Servers Set Up a Role for Amazon GameLift Access Related actions All APIs by task", + "Creates a script resource for your Realtime Servers script. Realtime scripts are JavaScript files that provide configuration settings and optional custom game logic for your game. Script logic is executed during an active game session. To deploy Realtime Servers for hosting, create an Amazon GameLift managed fleet with the script. To create a script resource, specify a script name and provide the script file(s). The script files and all dependencies must be combined into a single .zip file. You can upload the .zip file from either of these locations: A locally available directory. Use the ZipFile parameter for this option. An Amazon Simple Storage Service (Amazon S3) bucket under your Amazon Web Services account. Use the StorageLocation parameter for this option. You'll need to have an Identity Access Management (IAM) role that allows the Amazon GameLift service to access your S3 bucket. If the call is successful, Amazon GameLift creates a new script resource with a unique script ID. The script is uploaded to an Amazon S3 bucket that is owned by Amazon GameLift. Learn more Amazon GameLift Realtime Servers Set Up a Role for Amazon GameLift Access Related actions All APIs by task", options: [ { name: "--name", description: - "A descriptive label that is associated with a script. Script names don't need to be unique. You can use UpdateScript to change this value later", + "A descriptive label that is associated with a script. Script names do not need to be unique. You can use UpdateScript to change this value later", args: { name: "string", }, @@ -1246,7 +1397,7 @@ const completionSpec: Fig.Spec = { { name: "--script-version", description: - "Version information associated with a build or script. Version strings don't need to be unique. You can use UpdateScript to change this value later", + "Version information that is associated with a build or script. Version strings do not need to be unique. You can use UpdateScript to change this value later", args: { name: "string", }, @@ -1422,10 +1573,42 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-container-fleet", + description: + "Deletes all resources and information related to a container fleet and shuts down currently running fleet instances, including those in remote locations. The container fleet must be in ACTIVE status to be deleted. To delete a fleet, specify the fleet ID to be terminated. During the deletion process, the fleet status is changed to DELETING. Learn more Setting up Amazon GameLift Fleets", + options: [ + { + name: "--fleet-id", + description: + "A unique identifier for the container fleet to delete. You can use either the fleet ID or ARN value", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-container-group-definition", description: - "This operation is used with the Amazon GameLift containers feature, which is currently in public preview. Deletes a container group definition resource. You can delete a container group definition if there are no fleets using the definition. To delete a container group definition, identify the resource to delete. Learn more Manage a container group definition", + "Deletes a container group definition. You can delete a container group definition if there are no fleets using the definition. Request options: Delete an entire container group definition, including all versions. Specify the container group definition name, or use an ARN value without the version number. Delete a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number. Keep the newest versions and delete all older versions. Specify the container group definition name and the number of versions to retain. For example, set VersionCountToRetain to 5 to delete all but the five most recent versions. Learn more Manage a container group definition", options: [ { name: "--name", @@ -1435,6 +1618,21 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--version-number", + description: "The specific version to delete", + args: { + name: "integer", + }, + }, + { + name: "--version-count-to-retain", + description: + "The number of most recent versions to keep while deleting all older versions", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -1847,7 +2045,7 @@ const completionSpec: Fig.Spec = { { name: "deregister-compute", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Removes a compute resource from an Amazon GameLift Anywhere fleet or container fleet. Deregistered computes can no longer host game sessions through Amazon GameLift. For an Anywhere fleet or a container fleet that's running the Amazon GameLift Agent, the Agent handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the Agent, call this operation to deregister fleet computes. To deregister a compute, call this operation from the compute that's being deregistered and specify the compute name and the fleet ID", + "Removes a compute resource from an Amazon GameLift Anywhere fleet. Deregistered computes can no longer host game sessions through Amazon GameLift. For an Anywhere fleet that's running the Amazon GameLift Agent, the Agent handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the Agent, call this operation to deregister fleet computes. To deregister a compute, call this operation from the compute that's being deregistered and specify the compute name and the fleet ID", options: [ { name: "--fleet-id", @@ -1860,7 +2058,7 @@ const completionSpec: Fig.Spec = { { name: "--compute-name", description: - "The unique identifier of the compute resource to deregister. For an Anywhere fleet compute, use the registered compute name. For a container fleet, use the compute name (for example, a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN", + "The unique identifier of the compute resource to deregister. For an Anywhere fleet compute, use the registered compute name", args: { name: "string", }, @@ -1991,7 +2189,7 @@ const completionSpec: Fig.Spec = { { name: "describe-compute", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all computes in a fleet, call ListCompute. To request information on a specific compute, provide the fleet ID and compute name. If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information: For EC2 fleets, this operation returns information about the EC2 instance. For ANYWHERE fleets, this operation returns information about the registered compute. For CONTAINER fleets, this operation returns information about the container that's registered as a compute, and the instance it's running on. The compute name is the container name", + "Retrieves properties for a compute resource in an Amazon GameLift fleet. To get a list of all computes in a fleet, call ListCompute. To request information on a specific compute, provide the fleet ID and compute name. If successful, this operation returns details for the requested compute resource. Depending on the fleet's compute type, the result includes the following information: For managed EC2 fleets, this operation returns information about the EC2 instance. For Anywhere fleets, this operation returns information about the registered compute", options: [ { name: "--fleet-id", @@ -2004,7 +2202,39 @@ const completionSpec: Fig.Spec = { { name: "--compute-name", description: - "The unique identifier of the compute resource to retrieve properties for. For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, use the instance ID. For a container fleet, use the compute name (for example, a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN", + "The unique identifier of the compute resource to retrieve properties for. For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, use the instance ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "describe-container-fleet", + description: + "Retrieves the properties for a container fleet. When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. Request options Get container fleet properties for a single fleet. Provide either the fleet ID or ARN value. Results If successful, a ContainerFleet object is returned. This object includes the fleet properties, including information about the most recent deployment. Some API operations limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, the request fails and the error message contains the maximum allowed number", + options: [ + { + name: "--fleet-id", + description: + "A unique identifier for the container fleet to retrieve. You can use either the fleet ID or ARN value", args: { name: "string", }, @@ -2031,7 +2261,7 @@ const completionSpec: Fig.Spec = { { name: "describe-container-group-definition", description: - "This operation is used with the Amazon GameLift containers feature, which is currently in public preview. Retrieves the properties of a container group definition, including all container definitions in the group. To retrieve a container group definition, provide a resource identifier. If successful, this operation returns the complete properties of the container group definition. Learn more Manage a container group definition", + "Retrieves the properties of a container group definition, including all container definitions in the group. Request options: Retrieve the latest version of a container group definition. Specify the container group definition name only, or use an ARN value without a version number. Retrieve a particular version. Specify the container group definition name and a version number, or use an ARN value that includes the version number. Results: If successful, this operation returns the complete properties of a container group definition version. Learn more Manage a container group definition", options: [ { name: "--name", @@ -2041,6 +2271,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--version-number", + description: "The specific version to retrieve", + args: { + name: "integer", + }, + }, { name: "--cli-input-json", description: @@ -2103,7 +2340,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-attributes", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Retrieves core fleet-wide properties for fleets in an Amazon Web Services Region. Properties include the computing hardware and deployment configuration for instances in the fleet. You can use this operation in the following ways: To get attributes for specific fleets, provide a list of fleet IDs or fleet ARNs. To get attributes for all fleets, do not provide a fleet identifier. When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found. Some API operations limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, the request fails and the error message contains the maximum allowed number. Learn more Setting up Amazon GameLift fleets", + "Retrieves core fleet-wide properties for fleets in an Amazon Web Services Region. Properties include the computing hardware and deployment configuration for instances in the fleet. You can use this operation in the following ways: To get attributes for specific fleets, provide a list of fleet IDs or fleet ARNs. To get attributes for all fleets, do not provide a fleet identifier. When requesting attributes for multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetAttributes object is returned for each fleet requested, unless the fleet identifier is not found. Some API operations limit the number of fleet IDs that allowed in one request. If a request exceeds this limit, the request fails and the error message contains the maximum allowed number. Learn more Setting up Amazon GameLift fleets", options: [ { name: "--fleet-ids", @@ -2175,7 +2412,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-capacity", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for replica container groups. With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see DescribeFleetLocationCapacity. This operation can be used in the following ways: To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all fleets, do not provide a fleet identifier. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist. Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets", + "Retrieves the resource capacity settings for one or more fleets. For a container fleet, this operation also returns counts for game server container groups. With multi-location fleets, this operation retrieves data for the fleet's home Region only. To retrieve capacity for remote locations, see DescribeFleetLocationCapacity. This operation can be used in the following ways: To get capacity data for one or more specific fleets, provide a list of fleet IDs or fleet ARNs. To get capacity data for all fleets, do not provide a fleet identifier. When requesting multiple fleets, use the pagination parameters to retrieve results as a set of sequential pages. If successful, a FleetCapacity object is returned for each requested fleet ID. Each FleetCapacity object includes a Location property, which is set to the fleet's home Region. Capacity values are returned only for fleets that currently exist. Some API operations may limit the number of fleet IDs that are allowed in one request. If a request exceeds this limit, the request fails and the error message includes the maximum allowed. Learn more Setting up Amazon GameLift fleets GameLift metrics for fleets", options: [ { name: "--fleet-ids", @@ -2244,6 +2481,46 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-fleet-deployment", + description: + "Retrieves information about a managed container fleet deployment. Request options Get information about the latest deployment for a specific fleet. Provide the fleet ID or ARN. Get information about a specific deployment. Provide the fleet ID or ARN and the deployment ID. Results If successful, a FleetDeployment object is returned", + options: [ + { + name: "--fleet-id", + description: + "A unique identifier for the container fleet. You can use either the fleet ID or ARN value", + args: { + name: "string", + }, + }, + { + name: "--deployment-id", + description: + "A unique identifier for the deployment to return information for", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-fleet-events", description: @@ -2391,7 +2668,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-location-capacity", description: - "Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. For a container fleet, this operation also returns counts for replica container groups. Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity). To retrieve capacity data, identify a fleet and location. If successful, a FleetCapacity object is returned for the requested fleet location. Learn more Setting up Amazon GameLift fleets Amazon GameLift service locations for managed hosting GameLift metrics for fleets", + "Retrieves the resource capacity settings for a fleet location. The data returned includes the current capacity (number of EC2 instances) and some scaling settings for the requested fleet location. For a managed container fleet, this operation also returns counts for game server container groups. Use this operation to retrieve capacity information for a fleet's remote location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity). To retrieve capacity data, identify a fleet and location. If successful, a FleetCapacity object is returned for the requested fleet location. Learn more Setting up Amazon GameLift fleets Amazon GameLift service locations for managed hosting GameLift metrics for fleets", options: [ { name: "--fleet-id", @@ -2471,7 +2748,7 @@ const completionSpec: Fig.Spec = { { name: "describe-fleet-port-settings", description: - "Retrieves a fleet's inbound connection permissions. Connection permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range. To connect to game server processes on a container fleet, the port settings should include one or more of the fleet's connection ports. Use this operation in the following ways: To retrieve the port settings for a fleet, identify the fleet's unique identifier. To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations. If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets", + "Retrieves a fleet's inbound connection permissions. Inbound permissions specify IP addresses and port settings that incoming traffic can use to access server processes in the fleet. Game server processes that are running in the fleet must use a port that falls within this range. To connect to game server processes on a managed container fleet, the port settings should include one or more of the container fleet's connection ports. Use this operation in the following ways: To retrieve the port settings for a fleet, identify the fleet's unique identifier. To check the status of recent updates to a fleet remote location, specify the fleet ID and a location. Port setting updates can take time to propagate across all locations. If successful, a set of IpPermission objects is returned for the requested fleet ID. When specifying a location, this operation returns a pending status. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets", options: [ { name: "--fleet-id", @@ -3412,7 +3689,7 @@ const completionSpec: Fig.Spec = { { name: "describe-runtime-configuration", description: - "Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how, on computes in the fleet. For managed EC2 fleets, the runtime configuration describes server processes that run on each fleet instance. For container fleets, the runtime configuration describes server processes that run in each replica container group. You can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets Running multiple processes on a fleet", + "Retrieves a fleet's runtime configuration settings. The runtime configuration determines which server processes run, and how they run, and how many run concurrently on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration at any time using UpdateRuntimeConfiguration. To get the current runtime configuration for a fleet, provide the fleet ID. If successful, a RuntimeConfiguration object is returned for the requested fleet. If the requested fleet has been deleted, the result set is empty. Learn more Setting up Amazon GameLift fleets Running multiple processes on a fleet", options: [ { name: "--fleet-id", @@ -3620,7 +3897,7 @@ const completionSpec: Fig.Spec = { { name: "get-compute-access", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed fleet. This operation is not used with Amazon GameLift Anywhere fleets To request access, specify the compute name and the fleet ID. If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. EC2 fleets With an EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide. Container fleets With a container fleet (where compute type is CONTAINER), use these credentials and the target value with SSM to connect to the fleet instance where the container is running. After you're connected to the instance, use Docker commands to interact with the container. Learn more Remotely connect to fleet instances Debug fleet issues", + "Requests authorization to remotely connect to a hosting resource in a Amazon GameLift managed fleet. This operation is not used with Amazon GameLift Anywhere fleets. Request options To request access to a compute, specify the compute name and the fleet ID. Results If successful, this operation returns a set of temporary Amazon Web Services credentials, including a two-part access key and a session token. With a managed EC2 fleet (where compute type is EC2), use these credentials with Amazon EC2 Systems Manager (SSM) to start a session with the compute. For more details, see Starting a session (CLI) in the Amazon EC2 Systems Manager User Guide", options: [ { name: "--fleet-id", @@ -3633,7 +3910,7 @@ const completionSpec: Fig.Spec = { { name: "--compute-name", description: - "A unique identifier for the compute resource that you want to connect to. For an EC2 fleet compute, use the instance ID. For a container fleet, use the compute name (for example, a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN", + "A unique identifier for the compute resource that you want to connect to. For an EC2 fleet compute, use the instance ID. Use ListCompute to retrieve compute identifiers", args: { name: "string", }, @@ -3660,7 +3937,7 @@ const completionSpec: Fig.Spec = { { name: "get-compute-auth-token", description: - "Requests an authentication token from Amazon GameLift for a compute resource in an Amazon GameLift Anywhere fleet or container fleet. Game servers that are running on the compute use this token to communicate with the Amazon GameLift service, such as when calling the Amazon GameLift server SDK action InitSDK(). Authentication tokens are valid for a limited time span, so you need to request a fresh token before the current token expires. Use this operation based on the fleet compute type: For EC2 fleets, auth token retrieval and refresh is handled automatically. All game servers that are running on all fleet instances have access to a valid auth token. For ANYWHERE and CONTAINER fleets, if you're using the Amazon GameLift Agent, auth token retrieval and refresh is handled automatically for any container or Anywhere compute where the Agent is running. If you're not using the Agent, create a mechanism to retrieve and refresh auth tokens for computes that are running game server processes. Learn more Create an Anywhere fleet Test your integration Server SDK reference guides (for version 5.x)", + "Requests an authentication token from Amazon GameLift for a compute resource in an Amazon GameLift fleet. Game servers that are running on the compute use this token to communicate with the Amazon GameLift service, such as when calling the Amazon GameLift server SDK action InitSDK(). Authentication tokens are valid for a limited time span, so you need to request a fresh token before the current token expires. Request options For managed EC2 fleets (compute type EC2), auth token retrieval and refresh is handled automatically. All game servers that are running on all fleet instances have access to a valid auth token. For Anywhere fleets (compute type ANYWHERE), if you're using the Amazon GameLift Agent, auth token retrieval and refresh is handled automatically for any compute where the Agent is running. If you're not using the Agent, create a mechanism to retrieve and refresh auth tokens for computes that are running game server processes. Learn more Create an Anywhere fleet Test your integration Server SDK reference guides (for version 5.x)", options: [ { name: "--fleet-id", @@ -3673,7 +3950,7 @@ const completionSpec: Fig.Spec = { { name: "--compute-name", description: - "The name of the compute resource you are requesting the authentication token for. For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, use the instance ID. For a container fleet, use the compute name (for example, a123b456c789012d3e4567f8a901b23c/1a234b56-7cd8-9e0f-a1b2-c34d567ef8a9) or the compute ARN", + "The name of the compute resource you are requesting the authentication token for. For an Anywhere fleet compute, use the registered compute name. For an EC2 fleet instance, use the instance ID", args: { name: "string", }, @@ -3873,7 +4150,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, don't specify a value", + "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", args: { name: "string", }, @@ -3924,7 +4201,7 @@ const completionSpec: Fig.Spec = { { name: "list-compute", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Retrieves information on the compute resources in an Amazon GameLift fleet. To request a list of computes, specify the fleet ID. Use the pagination parameters to retrieve results in a set of sequential pages. You can filter the result set by location. If successful, this operation returns information on all computes in the requested fleet. Depending on the fleet's compute type, the result includes the following information: For EC2 fleets, this operation returns information about the EC2 instance. Compute names are instance IDs. For ANYWHERE fleets, this operation returns the compute names and details provided when the compute was registered with RegisterCompute. The GameLiftServiceSdkEndpoint or GameLiftAgentEndpoint is included. For CONTAINER fleets, this operation returns information about containers that are registered as computes, and the instances they're running on. Compute names are container names", + "Retrieves information on the compute resources in an Amazon GameLift fleet. Use the pagination parameters to retrieve results in a set of sequential pages. Request options: Retrieve a list of all computes in a fleet. Specify a fleet ID. Retrieve a list of all computes in a specific fleet location. Specify a fleet ID and location. Results: If successful, this operation returns information on a set of computes. Depending on the type of fleet, the result includes the following information: For managed EC2 fleets (compute type EC2), this operation returns information about the EC2 instance. Compute names are EC2 instance IDs. For Anywhere fleets (compute type ANYWHERE), this operation returns compute names and details as provided when the compute was registered with RegisterCompute. This includes GameLiftServiceSdkEndpoint or GameLiftAgentEndpoint", options: [ { name: "--fleet-id", @@ -3937,29 +4214,189 @@ const completionSpec: Fig.Spec = { { name: "--location", description: - "The name of a location to retrieve compute resources for. For an Amazon GameLift Anywhere fleet, use a custom location. For a multi-location EC2 or container fleet, provide a Amazon Web Services Region or Local Zone code (for example: us-west-2 or us-west-2-lax-1)", + "The name of a location to retrieve compute resources for. For an Amazon GameLift Anywhere fleet, use a custom location. For a managed fleet, provide a Amazon Web Services Region or Local Zone code (for example: us-west-2 or us-west-2-lax-1)", args: { name: "string", }, }, { - name: "--limit", + name: "--container-group-definition-name", description: - "The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages", + "For computes in a managed container fleet, the name of the deployed container group definition", args: { - name: "integer", + name: "string", }, }, { - name: "--next-token", + name: "--compute-status", description: - "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", + "The status of computes in a managed container fleet, based on the success of the latest update deployment. ACTIVE -- The compute is deployed with the correct container definitions. It is ready to process game servers and host game sessions. IMPAIRED -- An update deployment to the compute failed, and the compute is deployed with incorrect container definitions", args: { name: "string", }, }, { - name: "--cli-input-json", + name: "--limit", + description: + "The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-container-fleets", + description: + "Retrieves a collection of container fleet resources in an Amazon Web Services Region. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only. Request options Get a list of all fleets. Call this operation without specifying a container group definition. Get a list of fleets filtered by container group definition. Provide the container group definition name or ARN value. To get a list of all Realtime Servers fleets with a specific configuration script, provide the script ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns a collection of container fleets that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Fleet IDs are returned in no particular order", + options: [ + { + name: "--container-group-definition-name", + description: + "The container group definition to filter the list on. Use this parameter to retrieve only those fleets that use the specified container group definition. You can specify the container group definition's name to get fleets with the latest versions. Alternatively, provide an ARN value to get fleets with a specific version number", + args: { + name: "string", + }, + }, + { + name: "--limit", + description: + "The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-container-group-definition-versions", + description: + "Retrieves all versions of a container group definition. Use the pagination parameters to retrieve results in a set of sequential pages. Request options: Get all versions of a specified container group definition. Specify the container group definition name or ARN value. (If the ARN value has a version number, it's ignored.) Results: If successful, this operation returns the complete properties of a set of container group definition versions that match the request. This operation returns the list of container group definitions in descending version order (latest first). Learn more Manage a container group definition", + options: [ + { + name: "--name", + description: + "The unique identifier for the container group definition to retrieve properties for. You can use either the Name or ARN value", + args: { + name: "string", + }, + }, + { + name: "--limit", + description: + "The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", description: "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", args: { @@ -4004,12 +4441,12 @@ const completionSpec: Fig.Spec = { { name: "list-container-group-definitions", description: - "This operation is used with the Amazon GameLift containers feature, which is currently in public preview. Retrieves all container group definitions for the Amazon Web Services account and Amazon Web Services Region that are currently in use. You can filter the result set by the container groups' scheduling strategy. Use the pagination parameters to retrieve results in a set of sequential pages. This operation returns the list of container group definitions in no particular order. Learn more Manage a container group definition", + "Retrieves container group definitions for the Amazon Web Services account and Amazon Web Services Region. Use the pagination parameters to retrieve results in a set of sequential pages. This operation returns only the latest version of each definition. To retrieve all versions of a container group definition, use ListContainerGroupDefinitionVersions. Request options: Retrieve the most recent versions of all container group definitions. Retrieve the most recent versions of all container group definitions, filtered by type. Specify the container group type to filter on. Results: If successful, this operation returns the complete properties of a set of container group definition versions that match the request. This operation returns the list of container group definitions in no particular order. Learn more Manage a container group definition", options: [ { - name: "--scheduling-strategy", + name: "--container-group-type", description: - "The type of container group definitions to retrieve. DAEMON -- Daemon container groups run background processes and are deployed once per fleet instance. REPLICA -- Replica container groups run your game server application and supporting software. Replica groups might be deployed multiple times per fleet instance", + "The type of container group to retrieve. Container group type determines how Amazon GameLift deploys the container group on each fleet instance", args: { name: "string", }, @@ -4074,30 +4511,94 @@ const completionSpec: Fig.Spec = { ], }, { - name: "list-fleets", + name: "list-fleet-deployments", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Retrieves a collection of fleet resources in an Amazon Web Services Region. You can filter the result set to find only those fleets that are deployed with a specific build or script. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only. You can use operation in the following ways: To get a list of all fleets in a Region, don't provide a build or script identifier. To get a list of all fleets where a specific game build is deployed, provide the build ID. To get a list of all Realtime Servers fleets with a specific configuration script, provide the script ID. To get a list of all fleets with a specific container group definition, provide the ContainerGroupDefinition ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns a list of fleet IDs that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Fleet IDs are returned in no particular order", + "Retrieves a collection of container fleet deployments in an Amazon Web Services Region. Request options Get a list of all deployments. Call this operation without specifying a fleet ID. Get a list of all deployments for a fleet. Specify the container fleet ID or ARN value. To get a list of all Realtime Servers fleets with a specific configuration script, provide the script ID. Use the pagination parameters to retrieve results as a set of sequential pages. Results If successful, this operation returns a list of deployments that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Fleet IDs are returned in no particular order", options: [ { - name: "--build-id", + name: "--fleet-id", description: - "A unique identifier for the build to request fleets for. Use this parameter to return only fleets using a specified build. Use either the build ID or ARN value", + "A unique identifier for the container fleet. You can use either the fleet ID or ARN value", args: { name: "string", }, }, { - name: "--script-id", + name: "--limit", description: - "A unique identifier for the Realtime script to request fleets for. Use this parameter to return only fleets using a specified script. Use either the script ID or ARN value", + "The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", args: { name: "string", }, }, { - name: "--container-group-definition-name", + name: "--cli-input-json", description: - "The container group definition name to request fleets for. Use this parameter to return only fleets that are deployed with the specified container group definition", + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "list-fleets", + description: + "Retrieves a collection of fleet resources in an Amazon Web Services Region. You can filter the result set to find only those fleets that are deployed with a specific build or script. For fleets that have multiple locations, this operation retrieves fleets based on their home Region only. You can use operation in the following ways: To get a list of all fleets in a Region, don't provide a build or script identifier. To get a list of all fleets where a specific game build is deployed, provide the build ID. To get a list of all Realtime Servers fleets with a specific configuration script, provide the script ID. Use the pagination parameters to retrieve results as a set of sequential pages. If successful, this operation returns a list of fleet IDs that match the request parameters. A NextToken value is also returned if there are more result pages to retrieve. Fleet IDs are returned in no particular order", + options: [ + { + name: "--build-id", + description: + "A unique identifier for the build to request fleets for. Use this parameter to return only fleets using a specified build. Use either the build ID or ARN value", + args: { + name: "string", + }, + }, + { + name: "--script-id", + description: + "A unique identifier for the Realtime script to request fleets for. Use this parameter to return only fleets using a specified script. Use either the script ID or ARN value", args: { name: "string", }, @@ -4389,7 +4890,7 @@ const completionSpec: Fig.Spec = { { name: "--next-token", description: - "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, don't specify a value", + "A token that indicates the start of the next sequential page of results. Use the token that is returned with a previous call to this operation. To start at the beginning of the result set, do not specify a value", args: { name: "string", }, @@ -4575,7 +5076,7 @@ const completionSpec: Fig.Spec = { { name: "register-compute", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Registers a compute resource in an Amazon GameLift fleet. Register computes with an Amazon GameLift Anywhere fleet or a container fleet. For an Anywhere fleet or a container fleet that's running the Amazon GameLift Agent, the Agent handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the Agent, call this operation to register fleet computes. To register a compute, give the compute a name (must be unique within the fleet) and specify the compute resource's DNS name or IP address. Provide a fleet ID and a fleet location to associate with the compute being registered. You can optionally include the path to a TLS certificate on the compute resource. If successful, this operation returns compute details, including an Amazon GameLift SDK endpoint or Agent endpoint. Game server processes running on the compute can use this endpoint to communicate with the Amazon GameLift service. Each server process includes the SDK endpoint in its call to the Amazon GameLift server SDK action InitSDK(). To view compute details, call DescribeCompute with the compute name. Learn more Create an Anywhere fleet Test your integration Server SDK reference guides (for version 5.x)", + "Registers a compute resource in an Amazon GameLift Anywhere fleet. For an Anywhere fleet that's running the Amazon GameLift Agent, the Agent handles all compute registry tasks for you. For an Anywhere fleet that doesn't use the Agent, call this operation to register fleet computes. To register a compute, give the compute a name (must be unique within the fleet) and specify the compute resource's DNS name or IP address. Provide a fleet ID and a fleet location to associate with the compute being registered. You can optionally include the path to a TLS certificate on the compute resource. If successful, this operation returns compute details, including an Amazon GameLift SDK endpoint or Agent endpoint. Game server processes running on the compute can use this endpoint to communicate with the Amazon GameLift service. Each server process includes the SDK endpoint in its call to the Amazon GameLift server SDK action InitSDK(). To view compute details, call DescribeCompute with the compute name. Learn more Create an Anywhere fleet Test your integration Server SDK reference guides (for version 5.x)", options: [ { name: "--fleet-id", @@ -4964,7 +5465,7 @@ const completionSpec: Fig.Spec = { { name: "start-game-session-placement", description: - "Places a request for a new game session in a queue. When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out. A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request. When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order. Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players. To place a new game session request, specify the following: The queue name and a set of game session properties and settings A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player) Latency data for all players (if you want to optimize game play for the players) If successful, a new game session placement is created. To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, you can resubmit the request or retry it with a different queue", + "Places a request for a new game session in a queue. When processing a placement request, Amazon GameLift searches for available resources on the queue's destinations, scanning each until it finds resources or the placement request times out. A game session placement request can also request player sessions. When a new game session is successfully created, Amazon GameLift creates a player session for each player included in the request. When placing a game session, by default Amazon GameLift tries each fleet in the order they are listed in the queue configuration. Ideally, a queue's destinations are listed in preference order. Alternatively, when requesting a game session with players, you can also provide latency data for each player in relevant Regions. Latency data indicates the performance lag a player experiences when connected to a fleet in the Region. Amazon GameLift uses latency data to reorder the list of destinations to place the game session in a Region with minimal lag. If latency data is provided for multiple players, Amazon GameLift calculates each Region's average lag for all players and reorders to get the best game play across all players. To place a new game session request, specify the following: The queue name and a set of game session properties and settings A unique ID (such as a UUID) for the placement. You use this ID to track the status of the placement request (Optional) A set of player data and a unique player ID for each player that you are joining to the new game session (player data is optional, but if you include it, you must also provide a unique ID for each player) Latency data for all players (if you want to optimize game play for the players) If successful, a new game session placement is created. To track the status of a placement request, call DescribeGameSessionPlacement and check the request's status. If the status is FULFILLED, a new game session has been created and a game session ARN and Region are referenced. If the placement request times out, submit a new request to the same queue or a different queue", options: [ { name: "--placement-id", @@ -5009,7 +5510,7 @@ const completionSpec: Fig.Spec = { { name: "--player-latencies", description: - "A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to Amazon Web Services Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players", + "A set of values, expressed in milliseconds, that indicates the amount of latency that a player experiences when connected to @aws; Regions. This information is used to try to place the new game session where it can offer the best possible gameplay experience for the players", args: { name: "list", }, @@ -5025,7 +5526,7 @@ const completionSpec: Fig.Spec = { { name: "--game-session-data", description: - "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session)", + "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session. For more information, see Start a game session", args: { name: "string", }, @@ -5453,7 +5954,7 @@ const completionSpec: Fig.Spec = { { name: "--name", description: - "A descriptive label associated with a build. Build names don't need to be unique", + "A descriptive label that is associated with a build. Build names do not need to be unique", args: { name: "string", }, @@ -5461,7 +5962,229 @@ const completionSpec: Fig.Spec = { { name: "--build-version", description: - "Version information associated with a build or script. Version strings don't need to be unique", + "Version information that is associated with a build or script. Version strings do not need to be unique", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-container-fleet", + description: + "Updates the properties of a managed container fleet. Depending on the properties being updated, this operation might initiate a fleet deployment. You can track deployments for a fleet using DescribeFleetDeployment. Request options As with CreateContainerFleet, many fleet properties use common defaults or are calculated based on the fleet's container group definitions. Update fleet properties that result in a fleet deployment. Include only those properties that you want to change. Specify deployment configuration settings. Update fleet properties that don't result in a fleet deployment. Include only those properties that you want to change. Changes to the following properties initiate a fleet deployment: GameServerContainerGroupDefinition PerInstanceContainerGroupDefinition GameServerContainerGroupsPerInstance InstanceInboundPermissions InstanceConnectionPortRange LogConfiguration Results If successful, this operation updates the container fleet resource, and might initiate a new deployment of fleet resources using the deployment configuration provided. A deployment replaces existing fleet instances with new instances that are deployed with the updated fleet properties. The fleet is placed in UPDATING status until the deployment is complete, then return to ACTIVE. You can have only one update deployment active at a time for a fleet. If a second update request initiates a deployment while another deployment is in progress, the first deployment is cancelled", + options: [ + { + name: "--fleet-id", + description: + "A unique identifier for the container fleet to update. You can use either the fleet ID or ARN value", + args: { + name: "string", + }, + }, + { + name: "--game-server-container-group-definition-name", + description: + "The name or ARN value of a new game server container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. You can't remove a fleet's game server container group definition, you can only update or replace it with another definition. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version", + args: { + name: "string", + }, + }, + { + name: "--per-instance-container-group-definition-name", + description: + "The name or ARN value of a new per-instance container group definition to deploy on the fleet. If you're updating the fleet to a specific version of a container group definition, use the ARN value and include the version number. If you're updating the fleet to the latest version of a container group definition, you can use the name value. Update a container group definition by calling UpdateContainerGroupDefinition. This operation creates a ContainerGroupDefinition resource with an incremented version. To remove a fleet's per-instance container group definition, leave this parameter empty and use the parameter RemoveAttributes", + args: { + name: "string", + }, + }, + { + name: "--game-server-container-groups-per-instance", + description: + "The number of times to replicate the game server container group on each fleet instance. By default, Amazon GameLift calculates the maximum number of game server container groups that can fit on each instance. You can remove this property value to use the calculated value, or set it manually. If you set this number manually, Amazon GameLift uses your value as long as it's less than the calculated maximum", + args: { + name: "integer", + }, + }, + { + name: "--instance-connection-port-range", + description: + "A revised set of port numbers to open on each fleet instance. By default, Amazon GameLift calculates an optimal port range based on your fleet configuration. If you previously set this parameter manually, you can't reset this to use the calculated settings", + args: { + name: "structure", + }, + }, + { + name: "--instance-inbound-permission-authorizations", + description: + "A set of ports to add to the container fleet's inbound permissions", + args: { + name: "list", + }, + }, + { + name: "--instance-inbound-permission-revocations", + description: + "A set of ports to remove from the container fleet's inbound permissions", + args: { + name: "list", + }, + }, + { + name: "--deployment-configuration", + description: + "Instructions for how to deploy updates to a container fleet, if the fleet update initiates a deployment. The deployment configuration lets you determine how to replace fleet instances and what actions to take if the deployment fails", + args: { + name: "structure", + }, + }, + { + name: "--description", + description: "A meaningful description of the container fleet", + args: { + name: "string", + }, + }, + { + name: "--metric-groups", + description: + "The name of an Amazon Web Services CloudWatch metric group to add this fleet to", + args: { + name: "list", + }, + }, + { + name: "--new-game-session-protection-policy", + description: + "The game session protection policy to apply to all new game sessions that are started in this fleet. Game sessions that already exist are not affected", + args: { + name: "string", + }, + }, + { + name: "--game-session-creation-limit-policy", + description: + "A policy that limits the number of game sessions that each individual player can create on instances in this fleet. The limit applies for a specified span of time", + args: { + name: "structure", + }, + }, + { + name: "--log-configuration", + description: "The method for collecting container logs for the fleet", + args: { + name: "structure", + }, + }, + { + name: "--remove-attributes", + description: + "If set, this update removes a fleet's per-instance container group definition. You can't remove a fleet's game server container group definition", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-container-group-definition", + description: + "Updates properties in an existing container group definition. This operation doesn't replace the definition. Instead, it creates a new version of the definition and saves it separately. You can access all versions that you choose to retain. The only property you can't update is the container group type. Request options: Update based on the latest version of the container group definition. Specify the container group definition name only, or use an ARN value without a version number. Provide updated values for the properties that you want to change only. All other values remain the same as the latest version. Update based on a specific version of the container group definition. Specify the container group definition name and a source version number, or use an ARN value with a version number. Provide updated values for the properties that you want to change only. All other values remain the same as the source version. Change a game server container definition. Provide the updated container definition. Add or change a support container definition. Provide a complete set of container definitions, including the updated definition. Remove a support container definition. Provide a complete set of container definitions, excluding the definition to remove. If the container group has only one support container definition, provide an empty set. Results: If successful, this operation returns the complete properties of the new container group definition version. If the container group definition version is used in an active fleets, the update automatically initiates a new fleet deployment of the new version. You can track a fleet's deployments using ListFleetDeployments", + options: [ + { + name: "--name", + description: + "A descriptive identifier for the container group definition. The name value must be unique in an Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--game-server-container-definition", + description: + "An updated definition for the game server container in this group. Define a game server container only when the container group type is GAME_SERVER. You can pass in your container definitions as a JSON file", + args: { + name: "structure", + }, + }, + { + name: "--support-container-definitions", + description: + "One or more definitions for support containers in this group. You can define a support container in any type of container group. You can pass in your container definitions as a JSON file", + args: { + name: "list", + }, + }, + { + name: "--total-memory-limit-mebibytes", + description: + "The maximum amount of memory (in MiB) to allocate to the container group. All containers in the group share this memory. If you specify memory limits for an individual container, the total value must be greater than any individual container's memory limit", + args: { + name: "integer", + }, + }, + { + name: "--total-vcpu-limit", + description: + "The maximum amount of vCPU units to allocate to the container group (1 vCPU is equal to 1024 CPU units). All containers in the group share this memory. If you specify vCPU limits for individual containers, the total value must be equal to or greater than the sum of the CPU limits for all containers in the group", + args: { + name: "double", + }, + }, + { + name: "--version-description", + description: + "A description for this update to the container group definition", + args: { + name: "string", + }, + }, + { + name: "--source-version-number", + description: + "The container group definition version to update. The new version starts with values from the source version, and then updates values included in this request", + args: { + name: "integer", + }, + }, + { + name: "--operating-system", + description: + "The platform that all containers in the group use. Containers in a group must run on the same operating system. Amazon Linux 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x, first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK version 5", args: { name: "string", }, @@ -5516,7 +6239,7 @@ const completionSpec: Fig.Spec = { { name: "--new-game-session-protection-policy", description: - "The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession . NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event", + "The game session protection policy to apply to all new game sessions created in this fleet. Game sessions that already exist are not affected. You can set protection for individual game sessions using UpdateGameSession. NoProtection -- The game session can be terminated during a scale-down event. FullProtection -- If the game session is in an ACTIVE status, it cannot be terminated during a scale-down event", args: { name: "string", }, @@ -5566,7 +6289,7 @@ const completionSpec: Fig.Spec = { { name: "update-fleet-capacity", description: - "This operation has been expanded to use with the Amazon GameLift containers feature, which is currently in public preview. Updates capacity settings for a managed EC2 fleet or container fleet. For these fleets, you adjust capacity by changing the number of instances in the fleet. Fleet capacity determines the number of game sessions and players that the fleet can host based on its configuration. For fleets with multiple locations, use this operation to manage capacity settings in each location individually. Use this operation to set these fleet capacity properties: Minimum/maximum size: Set hard limits on the number of Amazon EC2 instances allowed. If Amazon GameLift receives a request--either through manual update or automatic scaling--it won't change the capacity to a value outside of this range. Desired capacity: As an alternative to automatic scaling, manually set the number of Amazon EC2 instances to be maintained. Before changing a fleet's desired capacity, check the maximum capacity of the fleet's Amazon EC2 instance type by calling DescribeEC2InstanceLimits. To update capacity for a fleet's home Region, or if the fleet has no remote locations, omit the Location parameter. The fleet must be in ACTIVE status. To update capacity for a fleet's remote location, set the Location parameter to the location to update. The location must be in ACTIVE status. If successful, Amazon GameLift updates the capacity settings and returns the identifiers for the updated fleet and/or location. If a requested change to desired capacity exceeds the instance type's limit, the LimitExceeded exception occurs. Updates often prompt an immediate change in fleet capacity, such as when current capacity is different than the new desired capacity or outside the new limits. In this scenario, Amazon GameLift automatically initiates steps to add or remove instances in the fleet location. You can track a fleet's current capacity by calling DescribeFleetCapacity or DescribeFleetLocationCapacity. Learn more Scaling fleet capacity", + "Updates capacity settings for a managed EC2 fleet or managed container fleet. For these fleets, you adjust capacity by changing the number of instances in the fleet. Fleet capacity determines the number of game sessions and players that the fleet can host based on its configuration. For fleets with multiple locations, use this operation to manage capacity settings in each location individually. Use this operation to set these fleet capacity properties: Minimum/maximum size: Set hard limits on the number of Amazon EC2 instances allowed. If Amazon GameLift receives a request--either through manual update or automatic scaling--it won't change the capacity to a value outside of this range. Desired capacity: As an alternative to automatic scaling, manually set the number of Amazon EC2 instances to be maintained. Before changing a fleet's desired capacity, check the maximum capacity of the fleet's Amazon EC2 instance type by calling DescribeEC2InstanceLimits. To update capacity for a fleet's home Region, or if the fleet has no remote locations, omit the Location parameter. The fleet must be in ACTIVE status. To update capacity for a fleet's remote location, set the Location parameter to the location to update. The location must be in ACTIVE status. If successful, Amazon GameLift updates the capacity settings and returns the identifiers for the updated fleet and/or location. If a requested change to desired capacity exceeds the instance type's limit, the LimitExceeded exception occurs. Updates often prompt an immediate change in fleet capacity, such as when current capacity is different than the new desired capacity or outside the new limits. In this scenario, Amazon GameLift automatically initiates steps to add or remove instances in the fleet location. You can track a fleet's current capacity by calling DescribeFleetCapacity or DescribeFleetLocationCapacity. Learn more Scaling fleet capacity", options: [ { name: "--fleet-id", @@ -5890,7 +6613,7 @@ const completionSpec: Fig.Spec = { { name: "--timeout-in-seconds", description: - "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status. By default, this property is set to 600", + "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a TIMED_OUT status", args: { name: "integer", }, @@ -5898,7 +6621,7 @@ const completionSpec: Fig.Spec = { { name: "--player-latency-policies", description: - "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value. When updating policies, provide a complete collection of policies", + "A set of policies that enforce a sliding cap on player latency when processing game sessions placement requests. Use multiple policies to gradually relax the cap over time if Amazon GameLift can't make a placement. Policies are evaluated in order starting with the lowest maximum latency value. When updating policies, provide a complete collection of policies", args: { name: "list", }, @@ -6035,7 +6758,7 @@ const completionSpec: Fig.Spec = { { name: "--additional-player-count", description: - "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if FlexMatchMode is set to STANDALONE", + "The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE", args: { name: "integer", }, @@ -6059,7 +6782,7 @@ const completionSpec: Fig.Spec = { { name: "--game-session-data", description: - "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session (see Start a Game Session). This information is added to the game session that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE", + "A set of custom game session properties, formatted as a single string value. This data is passed to a game server process with a request to start a new game session. For more information, see Start a game session. This information is added to the game session that is created for a successful match. This parameter is not used if FlexMatchMode is set to STANDALONE", args: { name: "string", }, @@ -6102,7 +6825,7 @@ const completionSpec: Fig.Spec = { { name: "update-runtime-configuration", description: - "Updates the runtime configuration for the specified fleet. The runtime configuration tells Amazon GameLift how to launch server processes on computes in the fleet. For managed EC2 fleets, it determines what server processes to run on each fleet instance. For container fleets, it describes what server processes to run in each replica container group. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in ACTIVE status. To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration with an updated set of server process configurations. If successful, the fleet's runtime configuration settings are updated. Fleet computes that run game server processes regularly check for and receive updated runtime configurations. The computes immediately take action to comply with the new configuration by launching new server processes or by not replacing existing processes when they shut down. Updating a fleet's runtime configuration never affects existing server processes. Learn more Setting up Amazon GameLift fleets", + "Updates the runtime configuration for the specified fleet. The runtime configuration tells Amazon GameLift how to launch server processes on computes in managed EC2 and Anywhere fleets. You can update a fleet's runtime configuration at any time after the fleet is created; it does not need to be in ACTIVE status. To update runtime configuration, specify the fleet ID and provide a RuntimeConfiguration with an updated set of server process configurations. If successful, the fleet's runtime configuration settings are updated. Fleet computes that run game server processes regularly check for and receive updated runtime configurations. The computes immediately take action to comply with the new configuration by launching new server processes or by not replacing existing processes when they shut down. Updating a fleet's runtime configuration never affects existing server processes. Learn more Setting up Amazon GameLift fleets", options: [ { name: "--fleet-id", @@ -6155,7 +6878,7 @@ const completionSpec: Fig.Spec = { { name: "--name", description: - "A descriptive label that is associated with a script. Script names don't need to be unique", + "A descriptive label that is associated with a script. Script names do not need to be unique", args: { name: "string", }, @@ -6179,7 +6902,7 @@ const completionSpec: Fig.Spec = { { name: "--script-version", description: - "Version information associated with a build or script. Version strings don't need to be unique", + "Version information that is associated with a build or script. Version strings do not need to be unique", args: { name: "string", }, diff --git a/src/aws/geo-maps.ts b/src/aws/geo-maps.ts new file mode 100644 index 00000000000..a0bb5792319 --- /dev/null +++ b/src/aws/geo-maps.ts @@ -0,0 +1,303 @@ +const completionSpec: Fig.Spec = { + name: "geo-maps", + description: + "Integrate high-quality base map data into your applications using MapLibre. Capabilities include: Access to comprehensive base map data, allowing you to tailor the map display to your specific needs. Multiple pre-designed map styles suited for various application types, such as navigation, logistics, or data visualization. Generation of static map images for scenarios where interactive maps aren't suitable, such as: Embedding in emails or documents Displaying in low-bandwidth environments Creating printable maps Enhancing application performance by reducing client-side rendering", + subcommands: [ + { + name: "get-glyphs", + description: "Returns the map's glyphs", + options: [ + { + name: "--font-stack", + description: + "Name of the FontStack to retrieve. Example: Amazon Ember Bold,Noto Sans Bold. The supported font stacks are as follows: Amazon Ember Bold Amazon Ember Bold Italic Amazon Ember Bold,Noto Sans Bold Amazon Ember Bold,Noto Sans Bold,Noto Sans Arabic Bold Amazon Ember Condensed RC BdItalic Amazon Ember Condensed RC Bold Amazon Ember Condensed RC Bold Italic Amazon Ember Condensed RC Bold,Noto Sans Bold Amazon Ember Condensed RC Bold,Noto Sans Bold,Noto Sans Arabic Condensed Bold Amazon Ember Condensed RC Light Amazon Ember Condensed RC Light Italic Amazon Ember Condensed RC LtItalic Amazon Ember Condensed RC Regular Amazon Ember Condensed RC Regular Italic Amazon Ember Condensed RC Regular,Noto Sans Regular Amazon Ember Condensed RC Regular,Noto Sans Regular,Noto Sans Arabic Condensed Regular Amazon Ember Condensed RC RgItalic Amazon Ember Condensed RC ThItalic Amazon Ember Condensed RC Thin Amazon Ember Condensed RC Thin Italic Amazon Ember Heavy Amazon Ember Heavy Italic Amazon Ember Light Amazon Ember Light Italic Amazon Ember Medium Amazon Ember Medium Italic Amazon Ember Medium,Noto Sans Medium Amazon Ember Medium,Noto Sans Medium,Noto Sans Arabic Medium Amazon Ember Regular Amazon Ember Regular Italic Amazon Ember Regular Italic,Noto Sans Italic Amazon Ember Regular Italic,Noto Sans Italic,Noto Sans Arabic Regular Amazon Ember Regular,Noto Sans Regular Amazon Ember Regular,Noto Sans Regular,Noto Sans Arabic Regular Amazon Ember Thin Amazon Ember Thin Italic AmazonEmberCdRC_Bd AmazonEmberCdRC_BdIt AmazonEmberCdRC_Lt AmazonEmberCdRC_LtIt AmazonEmberCdRC_Rg AmazonEmberCdRC_RgIt AmazonEmberCdRC_Th AmazonEmberCdRC_ThIt AmazonEmber_Bd AmazonEmber_BdIt AmazonEmber_He AmazonEmber_HeIt AmazonEmber_Lt AmazonEmber_LtIt AmazonEmber_Md AmazonEmber_MdIt AmazonEmber_Rg AmazonEmber_RgIt AmazonEmber_Th AmazonEmber_ThIt Noto Sans Black Noto Sans Black Italic Noto Sans Bold Noto Sans Bold Italic Noto Sans Extra Bold Noto Sans Extra Bold Italic Noto Sans Extra Light Noto Sans Extra Light Italic Noto Sans Italic Noto Sans Light Noto Sans Light Italic Noto Sans Medium Noto Sans Medium Italic Noto Sans Regular Noto Sans Semi Bold Noto Sans Semi Bold Italic Noto Sans Thin Noto Sans Thin Italic NotoSans-Bold NotoSans-Italic NotoSans-Medium NotoSans-Regular Open Sans Regular,Arial Unicode MS Regular", + args: { + name: "string", + }, + }, + { + name: "--font-unicode-range", + description: + "A Unicode range of characters to download glyphs for. This must be aligned to multiples of 256. Example: 0-255.pdf", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-sprites", + description: "Returns the map's sprites", + options: [ + { + name: "--file-name", + description: + "Sprites API: The name of the sprite \ufb01le to retrieve, following pattern sprites(@2x)?\\.(png|json). Example: sprites.png", + args: { + name: "string", + }, + }, + { + name: "--style", + description: + "Style specifies the desired map style for the Sprites APIs", + args: { + name: "string", + }, + }, + { + name: "--color-scheme", + description: + "Sets color tone for map such as dark and light for specific map styles. It applies to only vector map styles such as Standard and Monochrome. Example: Light Default value: Light Valid values for ColorScheme are case sensitive", + args: { + name: "string", + }, + }, + { + name: "--variant", + description: + "Optimizes map styles for specific use case or industry. You can choose allowed variant only with Standard map style. Example: Default Valid values for Variant are case sensitive", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-static-map", + description: + "Provides high-quality static map images with customizable options. You can modify the map's appearance and overlay additional information. It's an ideal solution for applications requiring tailored static map snapshots", + options: [ + { + name: "--bounding-box", + description: + "Takes in two pairs of coordinates, [Lon, Lat], denoting south-westerly and north-easterly edges of the image. The underlying area becomes the view of the image. Example: -123.17075,49.26959,-123.08125,49.31429", + args: { + name: "string", + }, + }, + { + name: "--bounded-positions", + description: + "Takes in two or more pair of coordinates, [Lon, Lat], with each coordinate separated by a comma. The API will generate an image to encompass all of the provided coordinates. Cannot be used with Zoom and or Radius Example: 97.170451,78.039098,99.045536,27.176178", + args: { + name: "string", + }, + }, + { + name: "--center", + description: + "Takes in a pair of coordinates, [Lon, Lat], which becomes the center point of the image. This parameter requires that either zoom or radius is set. Cannot be used with Zoom and or Radius Example: 49.295,-123.108", + args: { + name: "string", + }, + }, + { + name: "--compact-overlay", + description: + "Takes in a string to draw geometries on the image. The input is a comma separated format as follows format: [Lon, Lat] Example: line:-122.407653,37.798557,-122.413291,37.802443;color=%23DD0000;width=7;outline-color=#00DD00;outline-width=5yd|point:-122.40572,37.80004;label=Fog Hill Market;size=large;text-color=%23DD0000;color=#EE4B2B Currently it supports the following geometry types: point, line and polygon. It does not support multiPoint , multiLine and multiPolgyon", + args: { + name: "string", + }, + }, + { + name: "--geo-json-overlay", + description: + 'Takes in a string to draw geometries on the image. The input is a valid GeoJSON collection object. Example: {"type":"FeatureCollection","features": [{"type":"Feature","geometry":{"type":"MultiPoint","coordinates": [[-90.076345,51.504107],[-0.074451,51.506892]]},"properties": {"color":"#00DD00"}}]}', + args: { + name: "string", + }, + }, + { + name: "--height", + description: "Specifies the height of the map image", + args: { + name: "integer", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--padding", + description: + "Applies additional space (in pixels) around overlay feature to prevent them from being cut or obscured. Value for max and min is determined by: Min: 1 Max: min(height, width)/4 Example: 100", + args: { + name: "integer", + }, + }, + { + name: "--radius", + description: + "Used with center parameter, it specifies the zoom of the image where you can control it on a granular level. Takes in any value >= 1. Example: 1500 Cannot be used with Zoom. Unit: Meters", + args: { + name: "long", + }, + }, + { + name: "--file-name", + description: + "The map scaling parameter to size the image, icons, and labels. It follows the pattern of ^map(@2x)?$. Example: map, map@2x", + args: { + name: "string", + }, + }, + { + name: "--scale-bar-unit", + description: + "Displays a scale on the bottom right of the map image with the unit specified in the input. Example: KilometersMiles, Miles, Kilometers, MilesKilometers", + args: { + name: "string", + }, + }, + { + name: "--style", + description: + "Style specifies the desired map style for the Style APIs", + args: { + name: "string", + }, + }, + { + name: "--width", + description: "Specifies the width of the map image", + args: { + name: "integer", + }, + }, + { + name: "--zoom", + description: + "Specifies the zoom level of the map image. Cannot be used with Radius", + args: { + name: "float", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-style-descriptor", + description: "Returns information about the style", + options: [ + { + name: "--style", + description: "Style specifies the desired map style", + args: { + name: "string", + }, + }, + { + name: "--color-scheme", + description: + "Sets color tone for map such as dark and light for specific map styles. It applies to only vector map styles such as Standard and Monochrome. Example: Light Default value: Light Valid values for ColorScheme are case sensitive", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "Specifies the political view using ISO 3166-2 or ISO 3166-3 country code format. The following political views are currently supported: ARG: Argentina's view on the Southern Patagonian Ice Field and Tierra Del Fuego, including the Falkland Islands, South Georgia, and South Sandwich Islands EGY: Egypt's view on Bir Tawil IND: India's view on Gilgit-Baltistan KEN: Kenya's view on the Ilemi Triangle MAR: Morocco's view on Western Sahara PAK: Pakistan's view on Jammu and Kashmir and the Junagadh Area RUS: Russia's view on Crimea SDN: Sudan's view on the Halaib Triangle SRB: Serbia's view on Kosovo, Vukovar, and Sarengrad Islands SUR: Suriname's view on the Courantyne Headwaters and Lawa Headwaters SYR: Syria's view on the Golan Heights TUR: Turkey's view on Cyprus and Northern Cyprus TZA: Tanzania's view on Lake Malawi URY: Uruguay's view on Rincon de Artigas VNM: Vietnam's view on the Paracel Islands and Spratly Islands", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + { + name: "get-tile", + description: + "Returns a tile. Map tiles are used by clients to render a map. they're addressed using a grid arrangement with an X coordinate, Y coordinate, and Z (zoom) level", + options: [ + { + name: "--tileset", + description: + "Specifies the desired tile set. Valid Values: raster.satellite | vector.basemap", + args: { + name: "string", + }, + }, + { + name: "--z", + description: "The zoom value for the map tile", + args: { + name: "string", + }, + }, + { + name: "--x", + description: + "The X axis value for the map tile. Must be between 0 and 19", + args: { + name: "string", + }, + }, + { + name: "--y", + description: "The Y axis value for the map tile", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "outfile", + description: "Filename where the content will be saved", + args: { + name: "string", + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/geo-places.ts b/src/aws/geo-places.ts new file mode 100644 index 00000000000..00b7ebdcc78 --- /dev/null +++ b/src/aws/geo-places.ts @@ -0,0 +1,703 @@ +const completionSpec: Fig.Spec = { + name: "geo-places", + description: + "The Places API enables powerful location search and geocoding capabilities for your applications, offering global coverage with rich, detailed information. Key features include: Forward and reverse geocoding for addresses and coordinates Comprehensive place searches with detailed information, including: Business names and addresses Contact information Hours of operation POI (Points of Interest) categories Food types for restaurants Chain affiliation for relevant businesses Global data coverage with a wide range of POI categories Regular data updates to ensure accuracy and relevance", + subcommands: [ + { + name: "autocomplete", + description: + "The autocomplete operation speeds up and increases the accuracy of entering addresses by providing a list of address candidates matching a partially entered address. Results are sorted from most to least matching. Filtering and biasing can be used to increase the relevance of the results if additional search context is known", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position in longitude and latitude that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--postal-code-mode", + description: + "The PostalCodeMode affects how postal code results are returned. If a postal code spans multiple localities and this value is empty, partial district or locality information may be returned under a single postal code result entry. If it's populated with the value cityLookup, all cities in that postal code are returned", + args: { + name: "string", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "geocode", + description: + "The Geocode action allows you to obtain coordinates, addresses, and other information about places", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--query-components", + description: + "A structured free text query allows you to search for places by the name or text representation of specific properties of the place", + args: { + name: "structure", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-place", + description: + "Finds a place by its unique ID. A PlaceId is returned by other place operations", + options: [ + { + name: "--place-id", + description: + "The PlaceId of the place you wish to receive the information for", + args: { + name: "string", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters such as time zone that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "reverse-geocode", + description: + "The ReverseGeocode operation allows you to retrieve addresses and place information from coordinates", + options: [ + { + name: "--query-position", + description: + "The position, in [lng, lat] for which you are querying nearby resultsfor. Results closer to the position will be ranked higher then results further away from the position", + args: { + name: "list", + }, + }, + { + name: "--query-radius", + description: + "The maximum distance in meters from the QueryPosition from which a result will be returned", + args: { + name: "long", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "search-nearby", + description: "Search nearby a specified location", + options: [ + { + name: "--query-position", + description: + "The position, in [lng, lat] for which you are querying nearby resultsfor. Results closer to the position will be ranked higher then results further away from the position", + args: { + name: "list", + }, + }, + { + name: "--query-radius", + description: + "The maximum distance in meters from the QueryPosition from which a result will be returned", + args: { + name: "long", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "search-text", + description: + "Use the SearchText operation to search for geocode and place information. You can then complete a follow-up query suggested from the Suggest API via a query id", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--query-id", + description: "The query Id", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "suggest", + description: + "The Suggest operation finds addresses or place candidates based on incomplete or misspelled queries. You then select the best query to submit based on the returned results", + options: [ + { + name: "--query-text", + description: + "The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "An optional limit for the number of results returned in a single call", + args: { + name: "integer", + }, + }, + { + name: "--max-query-refinements", + description: + "Maximum number of query terms to be returned for use with a search text query", + args: { + name: "integer", + }, + }, + { + name: "--bias-position", + description: + "The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format. The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive", + args: { + name: "list", + }, + }, + { + name: "--filter", + description: + "A structure which contains a set of inclusion/exclusion properties that results must posses in order to be returned as a result", + args: { + name: "structure", + }, + }, + { + name: "--additional-features", + description: + "A list of optional additional parameters, such as time zone, that can be requested for each result", + args: { + name: "list", + }, + }, + { + name: "--language", + description: + "A list of BCP 47 compliant language codes for the results to be rendered in. If there is no data for the result in the requested language, data will be returned in the default language for the entry", + args: { + name: "string", + }, + }, + { + name: "--political-view", + description: + "The alpha-2 or alpha-3 character code for the political view of a country. The political view applies to the results of the request to represent unresolved territorial claims through the point of view of the specified country", + args: { + name: "string", + }, + }, + { + name: "--intended-use", + description: + "Indicates if the results will be stored. Defaults to SingleUse, if left empty", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/geo-routes.ts b/src/aws/geo-routes.ts new file mode 100644 index 00000000000..f77f590634a --- /dev/null +++ b/src/aws/geo-routes.ts @@ -0,0 +1,713 @@ +const completionSpec: Fig.Spec = { + name: "geo-routes", + description: + "With the Amazon Location Routes API you can calculate routes and estimate travel time based on up-to-date road network and live traffic information. Calculate optimal travel routes and estimate travel times using up-to-date road network and traffic data. Key features include: Point-to-point routing with estimated travel time, distance, and turn-by-turn directions Multi-point route optimization to minimize travel time or distance Route matrices for efficient multi-destination planning Isoline calculations to determine reachable areas within specified time or distance thresholds Map-matching to align GPS traces with the road network", + subcommands: [ + { + name: "calculate-isolines", + description: + "Use the CalculateIsolines action to find service areas that can be reached in a given threshold of time, distance", + options: [ + { + name: "--allow", + description: "Features that are allowed while calculating. a route", + args: { + name: "structure", + }, + }, + { + name: "--arrival-time", + description: + "Time of arrival at the destination. Time format: YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation", + args: { + name: "structure", + }, + }, + { + name: "--depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--no-depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--departure-time", + description: + "Time of departure from thr origin. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The final position for the route. In the World Geodetic System (WGS 84) format: [longitude, latitude]", + args: { + name: "list", + }, + }, + { + name: "--destination-options", + description: "Destination related options", + args: { + name: "structure", + }, + }, + { + name: "--isoline-geometry-format", + description: + "The format of the returned IsolineGeometry. Default Value:FlexiblePolyline", + args: { + name: "string", + }, + }, + { + name: "--isoline-granularity", + description: "Defines the granularity of the returned Isoline", + args: { + name: "structure", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--optimize-isoline-for", + description: + "Specifies the optimization criteria for when calculating an isoline. AccurateCalculation generates an isoline of higher granularity that is more precise. FastCalculation generates an isoline faster by reducing the granularity, and in turn the quality of the isoline. BalancedCalculation generates an isoline by balancing between quality and performance. Default Value: BalancedCalculation", + args: { + name: "string", + }, + }, + { + name: "--optimize-routing-for", + description: + "Specifies the optimization criteria for calculating a route. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origin", + description: "The start position for the route", + args: { + name: "list", + }, + }, + { + name: "--origin-options", + description: "Origin related options", + args: { + name: "structure", + }, + }, + { + name: "--thresholds", + description: + "Threshold to be used for the isoline calculation. Up to 3 thresholds per provided type can be requested", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. The mode Scooter also applies to motorcycles, set to Scooter when wanted to calculate options for motorcycles. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "calculate-route-matrix", + description: + "Calculates route matrix containing the results for all pairs of Origins to Destinations. Each row corresponds to one entry in Origins. Each entry in the row corresponds to the route from that entry in Origins to an entry in Destinations positions", + options: [ + { + name: "--allow", + description: "Features that are allowed while calculating. a route", + args: { + name: "structure", + }, + }, + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation", + args: { + name: "structure", + }, + }, + { + name: "--depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--no-depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--departure-time", + description: + "Time of departure from thr origin. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destinations", + description: "List of destinations for the route", + args: { + name: "list", + }, + }, + { + name: "--exclude", + description: + "Features to be strictly excluded while calculating the route", + args: { + name: "structure", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--optimize-routing-for", + description: + "Specifies the optimization criteria for calculating a route. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origins", + description: "The position in longitude and latitude for the origin", + args: { + name: "list", + }, + }, + { + name: "--routing-boundary", + description: + "Boundary within which the matrix is to be calculated. All data, origins and destinations outside the boundary are considered invalid. When request routing boundary was set as AutoCircle, the response routing boundary will return Circle derived from the AutoCircle settings", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "calculate-routes", + description: + "Calculates a route given the following required parameters: Origin and Destination", + options: [ + { + name: "--allow", + description: "Features that are allowed while calculating. a route", + args: { + name: "structure", + }, + }, + { + name: "--arrival-time", + description: + "Time of arrival at the destination. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, it violates the avoidance and the returned response produces a notice for the violation", + args: { + name: "structure", + }, + }, + { + name: "--depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--no-depart-now", + description: "Uses the current time as the time of departure", + }, + { + name: "--departure-time", + description: + "Time of departure from thr origin. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The final position for the route. In the World Geodetic System (WGS 84) format: [longitude, latitude]", + args: { + name: "list", + }, + }, + { + name: "--destination-options", + description: "Destination related options", + args: { + name: "structure", + }, + }, + { + name: "--driver", + description: "Driver related options", + args: { + name: "structure", + }, + }, + { + name: "--exclude", + description: + "Features to be strictly excluded while calculating the route", + args: { + name: "structure", + }, + }, + { + name: "--instructions-measurement-system", + description: + "Measurement system to be used for instructions within steps in the response", + args: { + name: "string", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--languages", + description: + "List of languages for instructions within steps in the response. Instructions in the requested language are returned only if they are available", + args: { + name: "list", + }, + }, + { + name: "--leg-additional-features", + description: + "A list of optional additional parameters such as timezone that can be requested for each result. Elevation: Retrieves the elevation information for each location. Incidents: Provides information on traffic incidents along the route. PassThroughWaypoints: Indicates waypoints that are passed through without stopping. Summary: Returns a summary of the route, including distance and duration. Tolls: Supplies toll cost information along the route. TravelStepInstructions: Provides step-by-step instructions for travel along the route. TruckRoadTypes: Returns information about road types suitable for trucks. TypicalDuration: Gives typical travel duration based on historical data. Zones: Specifies the time zone information for each waypoint", + args: { + name: "list", + }, + }, + { + name: "--leg-geometry-format", + description: + "Specifies the format of the geometry returned for each leg of the route. You can choose between two different geometry encoding formats. FlexiblePolyline: A compact and precise encoding format for the leg geometry. For more information on the format, see the GitHub repository for FlexiblePolyline . Simple: A less compact encoding, which is easier to decode but may be less precise and result in larger payloads", + args: { + name: "string", + }, + }, + { + name: "--max-alternatives", + description: + "Maximum number of alternative routes to be provided in the response, if available", + args: { + name: "integer", + }, + }, + { + name: "--optimize-routing-for", + description: + "Specifies the optimization criteria for calculating a route. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origin", + description: "The start position for the route", + args: { + name: "list", + }, + }, + { + name: "--origin-options", + description: "Origin related options", + args: { + name: "structure", + }, + }, + { + name: "--span-additional-features", + description: + "A list of optional features such as SpeedLimit that can be requested for a Span. A span is a section of a Leg for which the requested features have the same values", + args: { + name: "list", + }, + }, + { + name: "--tolls", + description: "Toll related options", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--travel-step-type", + description: + "Type of step returned by the response. Default provides basic steps intended for web based applications. TurnByTurn provides detailed instructions with more granularity intended for a turn based naviagtion system", + args: { + name: "string", + }, + }, + { + name: "--waypoints", + description: "List of waypoints between the Origin and Destination", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "optimize-waypoints", + description: + "Calculates the optimal order to travel between a set of waypoints to minimize either the travel time or the distance travelled during the journey, based on road network restrictions and the traffic pattern data", + options: [ + { + name: "--avoid", + description: + "Features that are avoided while calculating a route. Avoidance is on a best-case basis. If an avoidance can't be satisfied for a particular case, this setting is ignored", + args: { + name: "structure", + }, + }, + { + name: "--departure-time", + description: + "Departure time from the waypoint. Time format:YYYY-MM-DDThh:mm:ss.sssZ | YYYY-MM-DDThh:mm:ss.sss+hh:mm Examples: 2020-04-22T17:57:24Z 2020-04-22T17:57:24+02:00", + args: { + name: "string", + }, + }, + { + name: "--destination", + description: + "The final position for the route in the World Geodetic System (WGS 84) format: [longitude, latitude]", + args: { + name: "list", + }, + }, + { + name: "--destination-options", + description: "Destination related options", + args: { + name: "structure", + }, + }, + { + name: "--driver", + description: "Driver related options", + args: { + name: "structure", + }, + }, + { + name: "--exclude", + description: + "Features to be strictly excluded while calculating the route", + args: { + name: "structure", + }, + }, + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--optimize-sequencing-for", + description: + "Specifies the optimization criteria for the calculated sequence. Default Value: FastestRoute", + args: { + name: "string", + }, + }, + { + name: "--origin", + description: "The start position for the route", + args: { + name: "list", + }, + }, + { + name: "--origin-options", + description: "Origin related options", + args: { + name: "structure", + }, + }, + { + name: "--traffic", + description: "Traffic-related options", + args: { + name: "structure", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--waypoints", + description: "List of waypoints between the Origin and Destination", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "snap-to-roads", + description: + "The SnapToRoads action matches GPS trace to roads most likely traveled on", + options: [ + { + name: "--key", + description: + "Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request", + args: { + name: "string", + }, + }, + { + name: "--snapped-geometry-format", + description: + "Chooses what the returned SnappedGeometry format should be. Default Value: FlexiblePolyline", + args: { + name: "string", + }, + }, + { + name: "--snap-radius", + description: + "The radius around the provided tracepoint that is considered for snapping. Unit: meters Default value: 300", + args: { + name: "long", + }, + }, + { + name: "--trace-points", + description: + "List of trace points to be snapped onto the road network", + args: { + name: "list", + }, + }, + { + name: "--travel-mode", + description: + "Specifies the mode of transport when calculating a route. Used in estimating the speed of travel and road compatibility. Default Value: Car", + args: { + name: "string", + }, + }, + { + name: "--travel-mode-options", + description: + "Travel mode related options for the provided travel mode", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], +}; + +export default completionSpec; diff --git a/src/aws/glue.ts b/src/aws/glue.ts index 18588bd079c..70ec3c4c090 100644 --- a/src/aws/glue.ts +++ b/src/aws/glue.ts @@ -1055,6 +1055,96 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-column-statistics-task-settings", + description: "Creates settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to generate column statistics", + args: { + name: "string", + }, + }, + { + name: "--role", + description: "The role used for running the column statistics", + args: { + name: "string", + }, + }, + { + name: "--schedule", + description: + "A schedule for running the column statistics, specified in CRON syntax", + args: { + name: "string", + }, + }, + { + name: "--column-name-list", + description: "A list of column names for which to run statistics", + args: { + name: "list", + }, + }, + { + name: "--sample-size", + description: "The percentage of data to sample", + args: { + name: "double", + }, + }, + { + name: "--catalog-id", + description: + "The ID of the Data Catalog in which the database resides", + args: { + name: "string", + }, + }, + { + name: "--security-configuration", + description: + "Name of the security configuration that is used to encrypt CloudWatch logs", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "A map of tags", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-connection", description: @@ -2931,6 +3021,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-column-statistics-task-settings", + description: "Deletes settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to delete column statistics", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-connection", description: "Deletes a connection from the Data Catalog", @@ -4264,6 +4392,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-column-statistics-task-settings", + description: "Gets settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to retrieve column statistics", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-connection", description: "Retrieves a connection definition from the Data Catalog", @@ -9629,6 +9795,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "start-column-statistics-task-run-schedule", + description: "Starts a column statistics task run schedule", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to start a column statistic task run schedule", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "start-crawler", description: @@ -10236,6 +10440,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "stop-column-statistics-task-run-schedule", + description: "Stops a column statistics task run schedule", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to stop a column statistic task run schedule", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "stop-crawler", description: "If the specified crawler is running, stops the crawl", @@ -10730,6 +10972,89 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-column-statistics-task-settings", + description: "Updates settings for a column statistics task", + options: [ + { + name: "--database-name", + description: "The name of the database where the table resides", + args: { + name: "string", + }, + }, + { + name: "--table-name", + description: + "The name of the table for which to generate column statistics", + args: { + name: "string", + }, + }, + { + name: "--role", + description: "The role used for running the column statistics", + args: { + name: "string", + }, + }, + { + name: "--schedule", + description: + "A schedule for running the column statistics, specified in CRON syntax", + args: { + name: "string", + }, + }, + { + name: "--column-name-list", + description: "A list of column names for which to run statistics", + args: { + name: "list", + }, + }, + { + name: "--sample-size", + description: "The percentage of data to sample", + args: { + name: "double", + }, + }, + { + name: "--catalog-id", + description: + "The ID of the Data Catalog in which the database resides", + args: { + name: "string", + }, + }, + { + name: "--security-configuration", + description: + "Name of the security configuration that is used to encrypt CloudWatch logs", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-connection", description: "Updates a connection definition in the Data Catalog", diff --git a/src/aws/guardduty.ts b/src/aws/guardduty.ts index a0fca2a1d09..77daa4b6dd3 100644 --- a/src/aws/guardduty.ts +++ b/src/aws/guardduty.ts @@ -59,7 +59,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty member account", + "The unique ID of the detector of the GuardDuty member account. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -107,7 +107,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector that specifies the GuardDuty service whose findings you want to archive", + "The ID of the detector that specifies the GuardDuty service whose findings you want to archive. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -218,7 +218,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The detector ID associated with the GuardDuty account for which you want to create a filter", + "The detector ID associated with the GuardDuty account for which you want to create a filter. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -304,7 +304,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet", + "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -444,7 +444,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account for which you want to associate member accounts", + "The unique ID of the detector of the GuardDuty account for which you want to associate member accounts. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -484,7 +484,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the GuardDuty detector associated with the publishing destination", + "The ID of the GuardDuty detector associated with the publishing destination. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -539,7 +539,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector for which you need to create sample findings", + "The ID of the detector for which you need to create sample findings. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -578,7 +578,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account for which you want to create a ThreatIntelSet", + "The unique ID of the detector of the GuardDuty account for which you want to create a ThreatIntelSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -688,7 +688,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The unique ID of the detector that you want to delete", + description: + "The unique ID of the detector that you want to delete. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -719,7 +720,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the filter", + "The unique ID of the detector that is associated with the filter. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -758,7 +759,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector associated with the IPSet", + "The unique ID of the detector associated with the IPSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -861,7 +862,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account whose members you want to delete", + "The unique ID of the detector of the GuardDuty account whose members you want to delete. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -901,7 +902,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector associated with the publishing destination to delete", + "The unique ID of the detector associated with the publishing destination to delete. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -940,7 +941,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the threatIntelSet", + "The unique ID of the detector that is associated with the threatIntelSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -980,7 +981,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that the request is associated with", + "The unique ID of the detector that the request is associated with. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1068,7 +1069,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The detector ID of the delegated administrator for which you need to retrieve the information", + "The detector ID of the delegated administrator for which you need to retrieve the information. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1116,7 +1117,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector associated with the publishing destination to retrieve", + "The unique ID of the detector associated with the publishing destination to retrieve. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1354,7 +1355,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The unique ID of the GuardDuty detector", + description: + "The unique ID of the GuardDuty detector. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1401,7 +1403,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The unique ID of the detector that you want to get", + description: + "The unique ID of the detector that you want to get. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1433,7 +1436,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with this filter", + "The unique ID of the detector that is associated with this filter. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1472,7 +1475,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve", + "The ID of the detector that specifies the GuardDuty service whose findings you want to retrieve. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1518,7 +1521,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector whose findings statistics you want to retrieve", + "The ID of the detector whose findings statistics you want to retrieve. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1588,7 +1591,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the IPSet", + "The unique ID of the detector that is associated with the IPSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1683,7 +1686,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with this scan", + "The unique ID of the detector that is associated with this scan. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1715,7 +1718,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty member account", + "The unique ID of the detector of the GuardDuty member account. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1746,7 +1749,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The detector ID for the administrator account", + description: + "The detector ID for the administrator account. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1785,7 +1789,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account whose members you want to retrieve", + "The unique ID of the detector of the GuardDuty account whose members you want to retrieve. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1849,7 +1853,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty member account", + "The unique ID of the detector of the GuardDuty member account. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1889,7 +1893,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the threatIntelSet", + "The unique ID of the detector that is associated with the threatIntelSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1929,7 +1933,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector that specifies the GuardDuty service whose usage statistics you want to retrieve", + "The ID of the detector that specifies the GuardDuty service whose usage statistics you want to retrieve. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -1999,7 +2003,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty account with which you want to invite members", + "The unique ID of the detector of the GuardDuty account with which you want to invite members. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2057,7 +2061,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector whose coverage details you want to retrieve", + "The unique ID of the detector whose coverage details you want to retrieve. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2208,7 +2212,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the filter", + "The unique ID of the detector that is associated with the filter. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2280,7 +2284,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector that specifies the GuardDuty service whose findings you want to list", + "The ID of the detector that specifies the GuardDuty service whose findings you want to list. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2367,7 +2371,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with IPSet", + "The unique ID of the detector that is associated with IPSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2535,7 +2539,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the member", + "The unique ID of the detector that is associated with the member. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2679,7 +2683,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The detector ID for which you want to retrieve the publishing destination", + "The detector ID for which you want to retrieve the publishing destination. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2759,7 +2763,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that is associated with the threatIntelSet", + "The unique ID of the detector that is associated with the threatIntelSet. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2863,7 +2867,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector of the GuardDuty administrator account associated with the member accounts to monitor", + "The unique ID of the detector of the GuardDuty administrator account associated with the member accounts to monitor. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2903,7 +2907,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector associated with the GuardDuty administrator account that is monitoring member accounts", + "The unique ID of the detector associated with the GuardDuty administrator account that is monitoring member accounts. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -2980,7 +2984,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector associated with the findings to unarchive", + "The ID of the detector associated with the findings to unarchive. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3056,7 +3060,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The unique ID of the detector to update", + description: + "The unique ID of the detector to update. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3121,7 +3126,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that specifies the GuardDuty service where you want to update a filter", + "The unique ID of the detector that specifies the GuardDuty service where you want to update a filter. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3192,7 +3197,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector that is associated with the findings for which you want to update the feedback", + "The ID of the detector that is associated with the findings for which you want to update the feedback. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3245,7 +3250,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The detectorID that specifies the GuardDuty service whose IPSet you want to update", + "The detectorID that specifies the GuardDuty service whose IPSet you want to update. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3365,7 +3370,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The unique ID of the detector that specifies the GuardDuty service where you want to update scan settings", + "The unique ID of the detector that specifies the GuardDuty service where you want to update scan settings. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3412,7 +3417,8 @@ const completionSpec: Fig.Spec = { options: [ { name: "--detector-id", - description: "The detector ID of the administrator account", + description: + "The detector ID of the administrator account. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3466,7 +3472,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector that configures the delegated administrator", + "The ID of the detector that configures the delegated administrator. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3531,7 +3537,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The ID of the detector associated with the publishing destinations to update", + "The ID of the detector associated with the publishing destinations to update. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, @@ -3578,7 +3584,7 @@ const completionSpec: Fig.Spec = { { name: "--detector-id", description: - "The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update", + "The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update. To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API", args: { name: "string", }, diff --git a/src/aws/inspector2.ts b/src/aws/inspector2.ts index 90e6d52f419..13a968393bd 100644 --- a/src/aws/inspector2.ts +++ b/src/aws/inspector2.ts @@ -1556,7 +1556,7 @@ const completionSpec: Fig.Spec = { }, { name: "list-coverage", - description: "Lists coverage details for you environment", + description: "Lists coverage details for your environment", options: [ { name: "--filter-criteria", @@ -2390,7 +2390,7 @@ const completionSpec: Fig.Spec = { { name: "stop-cis-session", description: - "Stops a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start a CIS scan session for the scan ID supplied by the service", + "Stops a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to stop a CIS scan session for the scan ID supplied by the service", options: [ { name: "--message", diff --git a/src/aws/keyspaces.ts b/src/aws/keyspaces.ts index 95463710132..69074d9bfba 100644 --- a/src/aws/keyspaces.ts +++ b/src/aws/keyspaces.ts @@ -6,7 +6,7 @@ const completionSpec: Fig.Spec = { { name: "create-keyspace", description: - "The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names must be unique within each Region. CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace by using the GetKeyspace operation. For more information, see Creating keyspaces in the Amazon Keyspaces Developer Guide", + "The CreateKeyspace operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names must be unique within each Region. CreateKeyspace is an asynchronous operation. You can monitor the creation status of the new keyspace by using the GetKeyspace operation. For more information, see Create a keyspace in the Amazon Keyspaces Developer Guide", options: [ { name: "--keyspace-name", @@ -53,7 +53,7 @@ const completionSpec: Fig.Spec = { { name: "create-table", description: - "The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names must be unique. CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING. You can monitor the creation status of the new table by using the GetTable operation, which returns the current status of the table. You can start using a table when the status is ACTIVE. For more information, see Creating tables in the Amazon Keyspaces Developer Guide", + "The CreateTable operation adds a new table to the specified keyspace. Within a keyspace, table names must be unique. CreateTable is an asynchronous operation. When the request is received, the status of the table is set to CREATING. You can monitor the creation status of the new table by using the GetTable operation, which returns the current status of the table. You can start using a table when the status is ACTIVE. For more information, see Create a table in the Amazon Keyspaces Developer Guide", options: [ { name: "--keyspace-name", @@ -177,6 +177,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-type", + description: + "The CreateType operation creates a new user-defined type in the specified keyspace. For more information, see User-defined types (UDTs) in the Amazon Keyspaces Developer Guide", + options: [ + { + name: "--keyspace-name", + description: "The name of the keyspace", + args: { + name: "string", + }, + }, + { + name: "--type-name", + description: + "The name of the user-defined type. UDT names must contain 48 characters or less, must begin with an alphabetic character, and can only contain alpha-numeric characters and underscores. Amazon Keyspaces converts upper case characters automatically into lower case characters. Alternatively, you can declare a UDT name in double quotes. When declaring a UDT name inside double quotes, Amazon Keyspaces preserves upper casing and allows special characters. You can also use double quotes as part of the name when you create the UDT, but you must escape each double quote character with an additional double quote character", + args: { + name: "string", + }, + }, + { + name: "--field-definitions", + description: + "The field definitions, consisting of names and types, that define this type", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-keyspace", description: @@ -246,6 +293,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-type", + description: + "The DeleteType operation deletes a user-defined type (UDT). You can only delete a type that is not used in a table or another UDT", + options: [ + { + name: "--keyspace-name", + description: "The name of the keyspace of the to be deleted type", + args: { + name: "string", + }, + }, + { + name: "--type-name", + description: "The name of the type to be deleted", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-keyspace", description: @@ -280,7 +365,7 @@ const completionSpec: Fig.Spec = { { name: "get-table", description: - "Returns information about the table, including the table's name and current status, the keyspace name, configuration settings, and metadata. To read table metadata using GetTable, Select action permissions for the table and system tables are required to complete the operation", + "Returns information about the table, including the table's name and current status, the keyspace name, configuration settings, and metadata. To read table metadata using GetTable, the IAM principal needs Select action permissions for the table and the system keyspace", options: [ { name: "--keyspace-name", @@ -353,9 +438,48 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-type", + description: + "The GetType operation returns information about the type, for example the field definitions, the timestamp when the type was last modified, the level of nesting, the status, and details about if the type is used in other types and tables. To read keyspace metadata using GetType, the IAM principal needs Select action permissions for the system keyspace", + options: [ + { + name: "--keyspace-name", + description: "The name of the keyspace that contains this type", + args: { + name: "string", + }, + }, + { + name: "--type-name", + description: + "The formatted name of the type. For example, if the name of the type was created without double quotes, Amazon Keyspaces saved the name in lower-case characters. If the name was created in double quotes, you must use double quotes to specify the type name", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-keyspaces", - description: "Returns a list of keyspaces", + description: "The ListKeyspaces operation returns a list of keyspaces", options: [ { name: "--next-token", @@ -418,7 +542,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-tables", - description: "Returns a list of tables for a specified keyspace", + description: + "The ListTables operation returns a list of tables for a specified keyspace. To read keyspace metadata using ListTables, the IAM principal needs Select action permissions for the system keyspace", options: [ { name: "--next-token", @@ -489,7 +614,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "Returns a list of all tags associated with the specified Amazon Keyspaces resource", + "Returns a list of all tags associated with the specified Amazon Keyspaces resource. To read keyspace metadata using ListTagsForResource, the IAM principal needs Select action permissions for the specified resource and the system keyspace", options: [ { name: "--resource-arn", @@ -558,6 +683,78 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-types", + description: + "The ListTypes operation returns a list of types for a specified keyspace. To read keyspace metadata using ListTypes, the IAM principal needs Select action permissions for the system keyspace", + options: [ + { + name: "--next-token", + description: + "The pagination token. To resume pagination, provide the NextToken value as an argument of a subsequent API invocation", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The total number of types to return in the output. If the total number of types available is more than the value specified, a NextToken is provided in the output. To resume pagination, provide the NextToken value as an argument of a subsequent API invocation", + args: { + name: "integer", + }, + }, + { + name: "--keyspace-name", + description: + "The name of the keyspace that contains the listed types", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "restore-table", description: diff --git a/src/aws/lakeformation.ts b/src/aws/lakeformation.ts index bc876e41a97..b729f2bc7ec 100644 --- a/src/aws/lakeformation.ts +++ b/src/aws/lakeformation.ts @@ -322,6 +322,60 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-lf-tag-expression", + description: + "Creates a new LF-Tag expression with the provided name, description, catalog ID, and expression body. This call fails if a LF-Tag expression with the same name already exists in the caller\u2019s account or if the underlying LF-Tags don't exist. To call this API operation, caller needs the following Lake Formation permissions: CREATE_LF_TAG_EXPRESSION on the root catalog resource. GRANT_WITH_LF_TAG_EXPRESSION on all underlying LF-Tag key:value pairs included in the expression", + options: [ + { + name: "--name", + description: "A name for the expression", + args: { + name: "string", + }, + }, + { + name: "--description", + description: + "A description with information about the LF-Tag expression", + args: { + name: "string", + }, + }, + { + name: "--catalog-id", + description: + "The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment", + args: { + name: "string", + }, + }, + { + name: "--expression", + description: "A list of LF-Tag conditions (key-value pairs)", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-lake-formation-identity-center-configuration", description: @@ -507,6 +561,45 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-lf-tag-expression", + description: + "Deletes the LF-Tag expression. The caller must be a data lake admin or have DROP permissions on the LF-Tag expression. Deleting a LF-Tag expression will also delete all LFTagPolicy permissions referencing the LF-Tag expression", + options: [ + { + name: "--name", + description: "The name for the LF-Tag expression", + args: { + name: "string", + }, + }, + { + name: "--catalog-id", + description: + "The identifier for the Data Catalog. By default, the account ID in which the LF-Tag expression is saved", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-lake-formation-identity-center-configuration", description: @@ -994,6 +1087,45 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-lf-tag-expression", + description: + "Returns the details about the LF-Tag expression. The caller must be a data lake admin or must have DESCRIBE permission on the LF-Tag expression resource", + options: [ + { + name: "--name", + description: "The name for the LF-Tag expression", + args: { + name: "string", + }, + }, + { + name: "--catalog-id", + description: + "The identifier for the Data Catalog. By default, the account ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-query-state", description: @@ -1262,7 +1394,7 @@ const completionSpec: Fig.Spec = { { name: "get-temporary-glue-table-credentials", description: - "Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix", + "Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix. To call this API, the role that the service assumes must have lakeformation:GetDataAccess permission on the resource", options: [ { name: "--table-arn", @@ -1570,6 +1702,77 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-lf-tag-expressions", + description: + "Returns the LF-Tag expressions in caller\u2019s account filtered based on caller's permissions. Data Lake and read only admins implicitly can see all tag expressions in their account, else caller needs DESCRIBE permissions on tag expression", + options: [ + { + name: "--catalog-id", + description: + "The identifier for the Data Catalog. By default, the account ID", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum number of results to return", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A continuation token, if this is not the first call to retrieve this list", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-lf-tags", description: "Lists LF-tags that the requester has permission to view", @@ -1995,7 +2198,7 @@ const completionSpec: Fig.Spec = { { name: "register-resource", description: - "Registers the resource as managed by the Data Catalog. To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy. The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location. ResourceArn = arn:aws:s3:::my-bucket UseServiceLinkedRole = true If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn: arn:aws:iam::12345:role/my-data-access-role", + "Registers the resource as managed by the Data Catalog. To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy. The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location. ResourceArn = arn:aws:s3:::my-bucket/ UseServiceLinkedRole = true If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn: arn:aws:iam::12345:role/my-data-access-role", options: [ { name: "--resource-arn", @@ -2485,6 +2688,61 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-lf-tag-expression", + description: + "Updates the name of the LF-Tag expression to the new description and expression body provided. Updating a LF-Tag expression immediately changes the permission boundaries of all existing LFTagPolicy permission grants that reference the given LF-Tag expression", + options: [ + { + name: "--name", + description: "The name for the LF-Tag expression", + args: { + name: "string", + }, + }, + { + name: "--description", + description: + "The description with information about the saved LF-Tag expression", + args: { + name: "string", + }, + }, + { + name: "--catalog-id", + description: + "The identifier for the Data Catalog. By default, the account ID", + args: { + name: "string", + }, + }, + { + name: "--expression", + description: + "The LF-Tag expression body composed of one more LF-Tag key-value pairs", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-lake-formation-identity-center-configuration", description: "Updates the IAM Identity Center connection parameters", @@ -2687,8 +2945,7 @@ const completionSpec: Fig.Spec = { }, { name: "--storage-optimizer-config", - description: - "Name of the table for which to enable the storage optimizer", + description: "Name of the configuration for the storage optimizer", args: { name: "map", }, diff --git a/src/aws/lambda.ts b/src/aws/lambda.ts index aa77ec3eebb..0a155a88504 100644 --- a/src/aws/lambda.ts +++ b/src/aws/lambda.ts @@ -1603,7 +1603,7 @@ const completionSpec: Fig.Spec = { { name: "--kms-key-arn", description: - "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). If you don't provide a customer managed key, Lambda uses a default service key", + "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources: The function's environment variables. The function's Lambda SnapStart snapshots. When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda. The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key", args: { name: "string", generators: generators.listKmsKeys, @@ -4741,6 +4741,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--source-kms-key-arn", + description: + "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services managed key", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -4851,7 +4859,7 @@ const completionSpec: Fig.Spec = { { name: "--kms-key-arn", description: - "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's environment variables. When Lambda SnapStart is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). If you don't provide a customer managed key, Lambda uses a default service key", + "The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources: The function's environment variables. The function's Lambda SnapStart snapshots. When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda. The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key", args: { name: "string", generators: generators.listKmsKeys, diff --git a/src/aws/logs.ts b/src/aws/logs.ts index 6492cc2d2e4..a3d6458bd8e 100644 --- a/src/aws/logs.ts +++ b/src/aws/logs.ts @@ -85,7 +85,7 @@ const completionSpec: Fig.Spec = { { name: "create-delivery", description: - "Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries", + "Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. To update an existing delivery configuration, use UpdateDeliveryConfiguration", options: [ { name: "--delivery-source-name", @@ -106,7 +106,7 @@ const completionSpec: Fig.Spec = { { name: "--record-fields", description: - "The list of record fields to be delivered to the destination, in order. If the delivery\u2019s log source has mandatory fields, they must be included in this list", + "The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list", args: { name: "list", }, @@ -122,7 +122,7 @@ const completionSpec: Fig.Spec = { { name: "--s3-delivery-configuration", description: - "This structure contains parameters that are valid only when the delivery\u2019s delivery destination is an S3 bucket", + "This structure contains parameters that are valid only when the delivery's delivery destination is an S3 bucket", args: { name: "structure", }, @@ -208,7 +208,7 @@ const completionSpec: Fig.Spec = { { name: "--destination-prefix", description: - "The prefix used as the start of the key for every object exported. If you don't specify a value, the default is exportedlogs", + "The prefix used as the start of the key for every object exported. If you don't specify a value, the default is exportedlogs. The length of this parameter must comply with the S3 object key name length limits. The object key name is a sequence of Unicode characters with UTF-8 encoding, and can be up to 1,024 bytes", args: { name: "string", }, @@ -1393,7 +1393,7 @@ const completionSpec: Fig.Spec = { { name: "describe-log-groups", description: - "Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name. CloudWatch Logs doesn\u2019t support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability", + "Lists the specified log groups. You can list all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name. CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability", options: [ { name: "--account-identifiers", @@ -3349,7 +3349,7 @@ const completionSpec: Fig.Spec = { { name: "put-retention-policy", description: - "Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group. CloudWatch Logs doesn\u2019t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer. To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven\u2019t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted. When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing", + "Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group. CloudWatch Logs doesn't immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer. To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven't been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted. When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes value to see how many bytes a log group is storing", options: [ { name: "--log-group-name", @@ -3554,7 +3554,7 @@ const completionSpec: Fig.Spec = { { name: "--limit", description: - "The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 1000", + "The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 10,000", args: { name: "integer", }, @@ -3613,7 +3613,7 @@ const completionSpec: Fig.Spec = { { name: "tag-log-group", description: - "The TagLogGroup operation is on the path to deprecation. We recommend that you use TagResource instead. Adds or updates the specified tags for the specified log group. To list the tags for a log group, use ListTagsForResource. To remove tags, use UntagResource. For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide. CloudWatch Logs doesn\u2019t support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags", + "The TagLogGroup operation is on the path to deprecation. We recommend that you use TagResource instead. Adds or updates the specified tags for the specified log group. To list the tags for a log group, use ListTagsForResource. To remove tags, use UntagResource. For more information about tags, see Tag Log Groups in Amazon CloudWatch Logs in the Amazon CloudWatch Logs User Guide. CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags", options: [ { name: "--log-group-name", @@ -3730,7 +3730,7 @@ const completionSpec: Fig.Spec = { { name: "untag-log-group", description: - "The UntagLogGroup operation is on the path to deprecation. We recommend that you use UntagResource instead. Removes the specified tags from the specified log group. To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource. CloudWatch Logs doesn\u2019t support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys", + "The UntagLogGroup operation is on the path to deprecation. We recommend that you use UntagResource instead. Removes the specified tags from the specified log group. To list the tags for a log group, use ListTagsForResource. To add tags, use TagResource. CloudWatch Logs doesn't support IAM policies that prevent users from assigning specified tags to log groups using the aws:Resource/key-name or aws:TagKeys condition keys", options: [ { name: "--log-group-name", @@ -3807,7 +3807,7 @@ const completionSpec: Fig.Spec = { { name: "update-anomaly", description: - "Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won\u2019t report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won\u2019t report any anomalies related to that pattern. You must specify either anomalyId or patternId, but you can't specify both parameters in the same operation. If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and suppressionPeriod parameters", + "Use this operation to suppress anomaly detection for a specified anomaly or pattern. If you suppress an anomaly, CloudWatch Logs won't report new occurrences of that anomaly and won't update that anomaly with new data. If you suppress a pattern, CloudWatch Logs won't report any anomalies related to that pattern. You must specify either anomalyId or patternId, but you can't specify both parameters in the same operation. If you have previously used this operation to suppress detection of a pattern or anomaly, you can use it again to cause CloudWatch Logs to end the suppression. To do this, use this operation and specify the anomaly or pattern to stop suppressing, and omit the suppressionType and suppressionPeriod parameters", options: [ { name: "--anomaly-id", @@ -3849,6 +3849,16 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--baseline", + description: + "Set this to true to prevent CloudWatch Logs from displaying this behavior as an anomaly in the future. The behavior is then treated as baseline behavior. However, if similar but more severe occurrences of this behavior occur in the future, those will still be reported as anomalies. The default is false", + }, + { + name: "--no-baseline", + description: + "Set this to true to prevent CloudWatch Logs from displaying this behavior as an anomaly in the future. The behavior is then treated as baseline behavior. However, if similar but more severe occurrences of this behavior occur in the future, those will still be reported as anomalies. The default is false", + }, { name: "--cli-input-json", description: @@ -3883,7 +3893,7 @@ const completionSpec: Fig.Spec = { { name: "--record-fields", description: - "The list of record fields to be delivered to the destination, in order. If the delivery\u2019s log source has mandatory fields, they must be included in this list", + "The list of record fields to be delivered to the destination, in order. If the delivery's log source has mandatory fields, they must be included in this list", args: { name: "list", }, @@ -3899,7 +3909,7 @@ const completionSpec: Fig.Spec = { { name: "--s3-delivery-configuration", description: - "This structure contains parameters that are valid only when the delivery\u2019s delivery destination is an S3 bucket", + "This structure contains parameters that are valid only when the delivery's delivery destination is an S3 bucket", args: { name: "structure", }, diff --git a/src/aws/mediapackagev2.ts b/src/aws/mediapackagev2.ts index f978607053a..c7211ae22af 100644 --- a/src/aws/mediapackagev2.ts +++ b/src/aws/mediapackagev2.ts @@ -3,6 +3,69 @@ const completionSpec: Fig.Spec = { description: "This guide is intended for creating AWS Elemental MediaPackage resources in MediaPackage Version 2 (v2) starting from May 2023. To get started with MediaPackage v2, create your MediaPackage resources. There isn't an automated process to migrate your resources from MediaPackage v1 to MediaPackage v2. The names of the entities that you use to access this API, like URLs and ARNs, all have the versioning information added, like \"v2\", to distinguish from the prior version. If you used MediaPackage prior to this release, you can't use the MediaPackage v2 CLI or the MediaPackage v2 API to access any MediaPackage v1 resources. If you created resources in MediaPackage v1, use video on demand (VOD) workflows, and aren't looking to migrate to MediaPackage v2 yet, see the MediaPackage v1 Live API Reference. This is the AWS Elemental MediaPackage v2 Live REST API Reference. It describes all the MediaPackage API operations for live content in detail, and provides sample requests, responses, and errors for the supported web services protocols. We assume that you have the IAM permissions that you need to use MediaPackage via the REST API. We also assume that you are familiar with the features and operations of MediaPackage, as described in the AWS Elemental MediaPackage User Guide", subcommands: [ + { + name: "cancel-harvest-job", + description: "Cancels an in-progress harvest job", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel from which the harvest job is running", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel from which the harvest job is running", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint that the harvest job is harvesting from. This cannot be changed after the harvest job is submitted", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: + "The name of the harvest job to cancel. This name must be unique within the channel and cannot be changed after the harvest job is submitted", + args: { + name: "string", + }, + }, + { + name: "--e-tag", + description: + "The current Entity Tag (ETag) associated with the harvest job. Used for concurrency control", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-channel", description: @@ -131,6 +194,106 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-harvest-job", + description: + "Creates a new harvest job to export content from a MediaPackage v2 channel to an S3 bucket", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel from which to harvest content", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: "The name of the channel from which to harvest content", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint from which to harvest content", + args: { + name: "string", + }, + }, + { + name: "--description", + description: "An optional description for the harvest job", + args: { + name: "string", + }, + }, + { + name: "--harvested-manifests", + description: "A list of manifests to be harvested", + args: { + name: "structure", + }, + }, + { + name: "--schedule-configuration", + description: + "The configuration for when the harvest job should run, including start and end times", + args: { + name: "structure", + }, + }, + { + name: "--destination", + description: + "The S3 destination where the harvested content will be placed", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique, case-sensitive identifier that you provide to ensure the idempotency of the request", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: + "A name for the harvest job. This name must be unique within the channel", + args: { + name: "string", + }, + }, + { + name: "--tags", + description: "A collection of tags associated with the harvest job", + args: { + name: "map", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-origin-endpoint", description: @@ -573,6 +736,60 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-harvest-job", + description: "Retrieves the details of a specific harvest job", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: "The name of the harvest job to retrieve", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-origin-endpoint", description: @@ -805,6 +1022,102 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-harvest-jobs", + description: + "Retrieves a list of harvest jobs that match the specified criteria", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group to filter the harvest jobs by. If specified, only harvest jobs associated with channels in this group will be returned", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel to filter the harvest jobs by. If specified, only harvest jobs associated with this channel will be returned", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint to filter the harvest jobs by. If specified, only harvest jobs associated with this origin endpoint will be returned", + args: { + name: "string", + }, + }, + { + name: "--status", + description: + "The status to filter the harvest jobs by. If specified, only harvest jobs with this status will be returned", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum number of harvest jobs to return in a single request. If not specified, a default value will be used", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "A token used for pagination. Provide this value in subsequent requests to retrieve the next set of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-origin-endpoints", description: @@ -1316,6 +1629,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "wait", + description: + "Wait until a particular condition is satisfied. Each subcommand polls an API until the listed requirement is met", + subcommands: [ + { + name: "harvest-job-finished", + description: + "Wait until JMESPath query Status returns COMPLETED when polling with ``get-harvest-job``. It will poll every 2 seconds until a successful state has been reached. This will exit with a return code of 255 after 60 failed checks", + options: [ + { + name: "--channel-group-name", + description: + "The name of the channel group containing the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--channel-name", + description: + "The name of the channel associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--origin-endpoint-name", + description: + "The name of the origin endpoint associated with the harvest job", + args: { + name: "string", + }, + }, + { + name: "--harvest-job-name", + description: "The name of the harvest job to retrieve", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + ], + }, ], }; export default completionSpec; diff --git a/src/aws/mwaa.ts b/src/aws/mwaa.ts index dccbb56456b..9d340db41b8 100644 --- a/src/aws/mwaa.ts +++ b/src/aws/mwaa.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "mwaa", description: - "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", + "Amazon Managed Workflows for Apache Airflow This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What is Amazon MWAA?. Endpoints api.airflow.{region}.amazonaws.com - This endpoint is used for environment management. CreateEnvironment DeleteEnvironment GetEnvironment ListEnvironments ListTagsForResource TagResource UntagResource UpdateEnvironment env.airflow.{region}.amazonaws.com - This endpoint is used to operate the Airflow environment. CreateCliToken CreateWebLoginToken InvokeRestApi Regions For a list of supported regions, see Amazon MWAA endpoints and quotas in the Amazon Web Services General Reference", subcommands: [ { name: "create-cli-token", @@ -38,7 +38,7 @@ const completionSpec: Fig.Spec = { { name: "create-environment", description: - "Creates an Amazon Managed Workflows for Apache Airflow (MWAA) environment", + "Creates an Amazon Managed Workflows for Apache Airflow (Amazon MWAA) environment", options: [ { name: "--name", @@ -163,7 +163,7 @@ const completionSpec: Fig.Spec = { { name: "--airflow-version", description: - "The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2 2.8.1", + "The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA). Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1", args: { name: "string", }, @@ -294,7 +294,7 @@ const completionSpec: Fig.Spec = { { name: "delete-environment", description: - "Deletes an Amazon Managed Workflows for Apache Airflow (MWAA) environment", + "Deletes an Amazon Managed Workflows for Apache Airflow (Amazon MWAA) environment", options: [ { name: "--name", @@ -355,6 +355,70 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "invoke-rest-api", + description: + "Invokes the Apache Airflow REST API on the webserver with the specified inputs. To learn more, see Using the Apache Airflow REST API", + options: [ + { + name: "--name", + description: + "The name of the Amazon MWAA environment. For example, MyMWAAEnvironment", + args: { + name: "string", + }, + }, + { + name: "--path", + description: + "The Apache Airflow REST API endpoint path to be called. For example, /dags/123456/clearTaskInstances. For more information, see Apache Airflow API", + args: { + name: "string", + }, + }, + { + name: "--method", + description: + "The HTTP method used for making Airflow REST API calls. For example, POST", + args: { + name: "string", + }, + }, + { + name: "--query-parameters", + description: + "Query parameters to be included in the Apache Airflow REST API call, provided as a JSON object", + args: { + name: "structure", + }, + }, + { + name: "--body", + description: + "The request body for the Apache Airflow REST API call, provided as a JSON object", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-environments", description: @@ -593,7 +657,7 @@ const completionSpec: Fig.Spec = { { name: "--airflow-version", description: - "The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1", + "The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA. Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment. Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1", args: { name: "string", }, diff --git a/src/aws/network-firewall.ts b/src/aws/network-firewall.ts index 0e8ee88af9d..d754867f4dd 100644 --- a/src/aws/network-firewall.ts +++ b/src/aws/network-firewall.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "network-firewall", description: - "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. Network Firewall supports Suricata version 6.0.9. For information about Suricata, see the Suricata website. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", + "This is the API Reference for Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors. The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the Amazon Web Services REST APIs, see Amazon Web Services APIs. To access Network Firewall using the REST API endpoint: https://network-firewall..amazonaws.com Alternatively, you can use one of the Amazon Web Services SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see Amazon Web Services SDKs. For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide. Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source network analysis and threat detection engine. You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples: Allow domains or IP addresses for known Amazon Web Services service endpoints, such as Amazon S3, and block all other forms of traffic. Use custom lists of known bad domains to limit the types of domain names that your applications can access. Perform deep packet inspection on traffic entering or leaving your VPC. Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used. To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide. To start using Network Firewall, do the following: (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints", subcommands: [ { name: "associate-firewall-policy", diff --git a/src/aws/opensearch.ts b/src/aws/opensearch.ts index 401aabf4cbd..bc9b7e8b105 100644 --- a/src/aws/opensearch.ts +++ b/src/aws/opensearch.ts @@ -145,6 +145,62 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--prerequisite-package-id-list", + description: + "A list of package IDs that must be associated with the domain before the package specified in the request can be associated", + args: { + name: "list", + }, + }, + { + name: "--association-configuration", + description: + "The configuration for associating a package with an Amazon OpenSearch Service domain", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "associate-packages", + description: + "Operation in the Amazon OpenSearch Service API for associating multiple packages with a domain simultaneously", + options: [ + { + name: "--package-list", + description: + "A list of packages and their prerequisites to be associated with a domain", + args: { + name: "list", + }, + }, + { + name: "--domain-name", + description: + "The name of an OpenSearch Service domain. Domain names are unique across the domains owned by an account within an Amazon Web Services Region", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -184,6 +240,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--service", + description: "The Amazon Web Services service SP to grant access to", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -277,6 +340,76 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-application", + description: "Creates an OpenSearch Application", + options: [ + { + name: "--client-token", + description: + "A unique client idempotency token. It will be auto generated if not provided", + args: { + name: "string", + }, + }, + { + name: "--name", + description: + "Name of the OpenSearch Appication to create. Application names are unique across the applications owned by an account within an Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--data-sources", + description: + "Data sources to be associated with the OpenSearch Application", + args: { + name: "list", + }, + }, + { + name: "--iam-identity-center-options", + description: + "Settings of IAM Identity Center for the OpenSearch Application", + args: { + name: "structure", + }, + }, + { + name: "--app-configs", + description: + "Configurations of the OpenSearch Application, inlcuding admin configuration", + args: { + name: "list", + }, + }, + { + name: "--tag-list", + description: "A list of tags attached to a domain", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-domain", description: @@ -397,6 +530,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--identity-center-options", + description: + "Options for IAM Identity Center Option control for the domain", + args: { + name: "structure", + }, + }, { name: "--tag-list", description: "List of tags to add to the domain upon creation", @@ -546,6 +687,38 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--package-configuration", + description: + "The configuration parameters for the package being created", + args: { + name: "structure", + }, + }, + { + name: "--engine-version", + description: + "The version of the Amazon OpenSearch Service engine for which is compatible with the package. This can only be specified for package type ZIP-PLUGIN", + args: { + name: "string", + }, + }, + { + name: "--package-vending-options", + description: + "The vending options for the package being created. They determine if the package can be vended to other users", + args: { + name: "structure", + }, + }, + { + name: "--package-encryption-options", + description: + "The encryption parameters for the package being created", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -612,6 +785,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-application", + description: "Deletes an existing OpenSearch Application", + options: [ + { + name: "--id", + description: + "Unique identifier for the OpenSearch Application that you want to delete", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-data-source", description: @@ -1458,6 +1662,76 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "dissociate-packages", + description: "Dissociates multiple packages from a domain simulatneously", + options: [ + { + name: "--package-list", + description: "A list of package IDs to be dissociated from a domain", + args: { + name: "list", + }, + }, + { + name: "--domain-name", + description: + "The name of an OpenSearch Service domain. Domain names are unique across the domains owned by an account within an Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-application", + description: + "Check the configuration and status of an existing OpenSearch Application", + options: [ + { + name: "--id", + description: + "Unique identifier of the checked OpenSearch Application", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-compatible-versions", description: @@ -1690,6 +1964,77 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-applications", + description: "List all OpenSearch Applications under your account", + options: [ + { + name: "--next-token", + description: + "When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page", + args: { + name: "string", + }, + }, + { + name: "--statuses", + description: + "OpenSearch Application Status can be used as filters for the listing request. Possible values are CREATING, UPDATING, DELETING, FAILED, ACTIVE, and DELETED", + args: { + name: "list", + }, + }, + { + name: "--max-results", + description: + "An optional parameter that specifies the maximum number of results to return for a given request", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-data-sources", description: @@ -2348,6 +2693,13 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--service", + description: "The service SP to revoke access from", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2460,6 +2812,53 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "update-application", + description: "Update the OpenSearch Application", + options: [ + { + name: "--id", + description: + "Unique identifier of the OpenSearch Application to be updated", + args: { + name: "string", + }, + }, + { + name: "--data-sources", + description: + "Data sources to be associated with the OpenSearch Application", + args: { + name: "list", + }, + }, + { + name: "--app-configs", + description: + "Configurations to be changed for the OpenSearch Application", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "update-data-source", description: @@ -2632,6 +3031,13 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--identity-center-options", + description: "Container for IAM Identity Center Options settings", + args: { + name: "structure", + }, + }, { name: "--auto-tune-options", description: "Options for Auto-Tune", @@ -2732,6 +3138,67 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--package-configuration", + description: "The updated configuration details for a package", + args: { + name: "structure", + }, + }, + { + name: "--package-encryption-options", + description: "Encryption options for a package", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "update-package-scope", + description: + "Updates the scope of a package. Scope of the package defines users who can view and associate a package", + options: [ + { + name: "--package-id", + description: "ID of the package whose scope is being updated", + args: { + name: "string", + }, + }, + { + name: "--operation", + description: + "The operation to perform on the package scope (e.g., add/remove/override users)", + args: { + name: "string", + }, + }, + { + name: "--package-user-list", + description: + "List of users to be added or removed from the package scope", + args: { + name: "list", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/opensearchserverless.ts b/src/aws/opensearchserverless.ts index 4e9c364b93d..e2aa1ca884b 100644 --- a/src/aws/opensearchserverless.ts +++ b/src/aws/opensearchserverless.ts @@ -350,6 +350,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--iam-identity-center-options", + description: + "Describes IAM Identity Center options in the form of a key-value map. This field is required if you specify iamidentitycenter for the type parameter", + args: { + name: "structure", + }, + }, { name: "--name", description: "The name of the security configuration", @@ -1590,6 +1598,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--iam-identity-center-options-updates", + description: + "Describes IAM Identity Center options in the form of a key-value map", + args: { + name: "structure", + }, + }, { name: "--id", description: diff --git a/src/aws/outposts.ts b/src/aws/outposts.ts index 4099eb4c6d4..c858a26e8a9 100644 --- a/src/aws/outposts.ts +++ b/src/aws/outposts.ts @@ -702,6 +702,103 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-asset-instances", + description: + "A list of Amazon EC2 instances, belonging to all accounts, running on the specified Outpost. Does not include Amazon EBS or Amazon S3 instances", + options: [ + { + name: "--outpost-identifier", + description: "The ID of the Outpost", + args: { + name: "string", + }, + }, + { + name: "--asset-id-filter", + description: "Filters the results by asset ID", + args: { + name: "list", + }, + }, + { + name: "--instance-type-filter", + description: "Filters the results by instance ID", + args: { + name: "list", + }, + }, + { + name: "--account-id-filter", + description: "Filters the results by account ID", + args: { + name: "list", + }, + }, + { + name: "--aws-service-filter", + description: "Filters the results by Amazon Web Services service", + args: { + name: "list", + }, + }, + { + name: "--max-results", + description: "The maximum page size", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The pagination token", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-assets", description: @@ -786,6 +883,83 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-blocking-instances-for-capacity-task", + description: + "A list of Amazon EC2 instances running on the Outpost and belonging to the account that initiated the capacity task. Use this list to specify the instances you cannot stop to free up capacity to run the capacity task", + options: [ + { + name: "--outpost-identifier", + description: + "The ID or ARN of the Outpost associated with the specified capacity task", + args: { + name: "string", + }, + }, + { + name: "--capacity-task-id", + description: "The ID of the capacity task", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: "The maximum page size", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The pagination token", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-capacity-tasks", description: @@ -1216,7 +1390,7 @@ const completionSpec: Fig.Spec = { { name: "start-capacity-task", description: - "Starts the specified capacity task. You can have one active capacity task for an order", + "Starts the specified capacity task. You can have one active capacity task per order or Outpost", options: [ { name: "--outpost-identifier", @@ -1241,6 +1415,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--instances-to-exclude", + description: + "List of user-specified running instances that must not be stopped in order to free up the capacity needed to run the capacity task", + args: { + name: "structure", + }, + }, { name: "--dry-run", description: @@ -1251,6 +1433,14 @@ const completionSpec: Fig.Spec = { description: "You can request a dry run to determine if the instance type and instance size changes is above or below available instance capacity. Requesting a dry run does not make any changes to your plan", }, + { + name: "--task-action-on-blocking-instances", + description: + "Specify one of the following options in case an instance is blocking the capacity task from running. WAIT_FOR_EVACUATION - Checks every 10 minutes over 48 hours to determine if instances have stopped and capacity is available to complete the task. FAIL_TASK - The capacity task fails", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/payment-cryptography-data.ts b/src/aws/payment-cryptography-data.ts index 23c3d92db1a..611799a142f 100644 --- a/src/aws/payment-cryptography-data.ts +++ b/src/aws/payment-cryptography-data.ts @@ -307,7 +307,7 @@ const completionSpec: Fig.Spec = { { name: "generate-pin-data", description: - "Generates pin-related data such as PIN, PIN Verification Value (PVV), PIN Block, and PIN Offset during new card issuance or reissuance. For more information, see Generate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN data is never transmitted in clear to or from Amazon Web Services Payment Cryptography. This operation generates PIN, PVV, or PIN Offset and then encrypts it using Pin Encryption Key (PEK) to create an EncryptedPinBlock for transmission from Amazon Web Services Payment Cryptography. This operation uses a separate Pin Verification Key (PVK) for VISA PVV generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GenerateCardValidationData TranslatePinData VerifyPinData", + "Generates pin-related data such as PIN, PIN Verification Value (PVV), PIN Block, and PIN Offset during new card issuance or reissuance. For more information, see Generate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN data is never transmitted in clear to or from Amazon Web Services Payment Cryptography. This operation generates PIN, PVV, or PIN Offset and then encrypts it using Pin Encryption Key (PEK) to create an EncryptedPinBlock for transmission from Amazon Web Services Payment Cryptography. This operation uses a separate Pin Verification Key (PVK) for VISA PVV generation. Using ECDH key exchange, you can receive cardholder selectable PINs into Amazon Web Services Payment Cryptography. The ECDH derived key protects the incoming PIN block. You can also use it for reveal PIN, wherein the generated PIN block is protected by the ECDH derived key before transmission from Amazon Web Services Payment Cryptography. For more information on establishing ECDH derived keys, see the Generating keys in the Amazon Web Services Payment Cryptography User Guide. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GenerateCardValidationData TranslatePinData VerifyPinData", options: [ { name: "--generation-key-identifier", @@ -320,7 +320,7 @@ const completionSpec: Fig.Spec = { { name: "--encryption-key-identifier", description: - "The keyARN of the PEK that Amazon Web Services Payment Cryptography uses to encrypt the PIN Block", + "The keyARN of the PEK that Amazon Web Services Payment Cryptography uses to encrypt the PIN Block. For ECDH, it is the keyARN of the asymmetric ECC key", args: { name: "string", }, @@ -356,6 +356,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--encryption-wrapped-key", + description: + "Parameter information of a WrappedKeyBlock for encryption key exchange", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -457,12 +465,12 @@ const completionSpec: Fig.Spec = { { name: "translate-pin-data", description: - "Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN block translation involves changing the encrytion of PIN block from one encryption key to another encryption key and changing PIN block format from one to another without PIN block data leaving Amazon Web Services Payment Cryptography. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography supports TDES and AES key derivation type for DUKPT translations. This operation also supports dynamic keys, allowing you to pass a dynamic PEK as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To translate PIN block using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped PEK. The incoming wrapped key shall have a key purpose of P0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GeneratePinData VerifyPinData", + "Translates encrypted PIN block from and to ISO 9564 formats 0,1,3,4. For more information, see Translate PIN data in the Amazon Web Services Payment Cryptography User Guide. PIN block translation involves changing a PIN block from one encryption key to another and optionally change its format. PIN block translation occurs entirely within the HSM boundary and PIN data never enters or leaves Amazon Web Services Payment Cryptography in clear text. The encryption key transformation can be from PEK (Pin Encryption Key) to BDK (Base Derivation Key) for DUKPT or from BDK for DUKPT to PEK. Amazon Web Services Payment Cryptography also supports use of dynamic keys and ECDH (Elliptic Curve Diffie-Hellman) based key exchange for this operation. Dynamic keys allow you to pass a PEK as a TR-31 WrappedKeyBlock. They can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To translate PIN block using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped PEK. The incoming wrapped key shall have a key purpose of P0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide. Using ECDH key exchange, you can receive cardholder selectable PINs into Amazon Web Services Payment Cryptography. The ECDH derived key protects the incoming PIN block, which is translated to a PEK encrypted PIN block for use within the service. You can also use ECDH for reveal PIN, wherein the service translates the PIN block from PEK to a ECDH derived encryption key. For more information on establishing ECDH derived keys, see the Generating keys in the Amazon Web Services Payment Cryptography User Guide. The allowed combinations of PIN block format translations are guided by PCI. It is important to note that not all encrypted PIN block formats (example, format 1) require PAN (Primary Account Number) as input. And as such, PIN block format that requires PAN (example, formats 0,3,4) cannot be translated to a format (format 1) that does not require a PAN for generation. For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide. Amazon Web Services Payment Cryptography currently supports ISO PIN block 4 translation for PIN block built using legacy PAN length. That is, PAN is the right most 12 digits excluding the check digits. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: GeneratePinData VerifyPinData", options: [ { name: "--incoming-key-identifier", description: - "The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK. When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping key for PIN block. Otherwise, it is the key identifier used to perform the operation", + "The keyARN of the encryption key under which incoming PIN block data is encrypted. This key type can be PEK or BDK. For dynamic keys, it is the keyARN of KEK of the TR-31 wrapped PEK. For ECDH, it is the keyARN of the asymmetric ECC key", args: { name: "string", }, @@ -470,7 +478,7 @@ const completionSpec: Fig.Spec = { { name: "--outgoing-key-identifier", description: - "The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK", + "The keyARN of the encryption key for encrypting outgoing PIN block data. This key type can be PEK or BDK. For ECDH, it is the keyARN of the asymmetric ECC key", args: { name: "string", }, @@ -806,6 +814,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--encryption-wrapped-key", + description: + "Parameter information of a WrappedKeyBlock for encryption key exchange", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/payment-cryptography.ts b/src/aws/payment-cryptography.ts index e8d50df81eb..32a4734f924 100644 --- a/src/aws/payment-cryptography.ts +++ b/src/aws/payment-cryptography.ts @@ -462,8 +462,15 @@ const completionSpec: Fig.Spec = { { name: "list-aliases", description: - "Lists the aliases for all keys in the caller's Amazon Web Services account and Amazon Web Services Region. You can filter the list of aliases. For more information, see Using aliases in the Amazon Web Services Payment Cryptography User Guide. This is a paginated operation, which means that each response might contain only a subset of all the aliases. When the response contains only a subset of aliases, it includes a NextToken value. Use this value in a subsequent ListAliases request to get more aliases. When you receive a response with no NextToken (or an empty or null value), that means there are no more aliases to get. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: CreateAlias DeleteAlias GetAlias UpdateAlias", + "Lists the aliases for all keys in the caller's Amazon Web Services account and Amazon Web Services Region. You can filter the aliases by keyARN. For more information, see Using aliases in the Amazon Web Services Payment Cryptography User Guide. This is a paginated operation, which means that each response might contain only a subset of all the aliases. When the response contains only a subset of aliases, it includes a NextToken value. Use this value in a subsequent ListAliases request to get more aliases. When you receive a response with no NextToken (or an empty or null value), that means there are no more aliases to get. Cross-account use: This operation can't be used across different Amazon Web Services accounts. Related operations: CreateAlias DeleteAlias GetAlias UpdateAlias", options: [ + { + name: "--key-arn", + description: "The keyARN for which you want to list all aliases", + args: { + name: "string", + }, + }, { name: "--next-token", description: diff --git a/src/aws/qapps.ts b/src/aws/qapps.ts index 422a2248c22..e5b12366bb7 100644 --- a/src/aws/qapps.ts +++ b/src/aws/qapps.ts @@ -82,6 +82,124 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "batch-create-category", + description: + "Creates Categories for the Amazon Q Business application environment instance. Web experience users use Categories to tag and filter library items. For more information, see Custom labels for Amazon Q Apps", + options: [ + { + name: "--instance-id", + description: + "The unique identifier of the Amazon Q Business application environment instance", + args: { + name: "string", + }, + }, + { + name: "--categories", + description: "The list of category objects to be created", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "batch-delete-category", + description: + "Deletes Categories for the Amazon Q Business application environment instance. Web experience users use Categories to tag and filter library items. For more information, see Custom labels for Amazon Q Apps", + options: [ + { + name: "--instance-id", + description: + "The unique identifier of the Amazon Q Business application environment instance", + args: { + name: "string", + }, + }, + { + name: "--categories", + description: "The list of IDs of the categories to be deleted", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "batch-update-category", + description: + "Updates Categories for the Amazon Q Business application environment instance. Web experience users use Categories to tag and filter library items. For more information, see Custom labels for Amazon Q Apps", + options: [ + { + name: "--instance-id", + description: + "The unique identifier of the Amazon Q Business application environment instance", + args: { + name: "string", + }, + }, + { + name: "--categories", + description: + "The list of categories to be updated with their new values", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-library-item", description: @@ -560,6 +678,38 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-categories", + description: + "Lists the categories of a Amazon Q Business application environment instance. For more information, see Custom labels for Amazon Q Apps", + options: [ + { + name: "--instance-id", + description: + "The unique identifier of the Amazon Q Business application environment instance", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-library-items", description: diff --git a/src/aws/qbusiness.ts b/src/aws/qbusiness.ts index f50eb90bcf7..3c219cc3e14 100644 --- a/src/aws/qbusiness.ts +++ b/src/aws/qbusiness.ts @@ -246,7 +246,7 @@ const completionSpec: Fig.Spec = { { name: "create-application", description: - "Creates an Amazon Q Business application. There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users", + "Creates an Amazon Q Business application. There are new tiers for Amazon Q Business. Not all features in Amazon Q Business Pro are also available in Amazon Q Business Lite. For information on what's included in Amazon Q Business Lite and what's included in Amazon Q Business Pro, see Amazon Q Business tiers. You must use the Amazon Q Business console to assign subscription tiers to users. A Amazon Q Apps service linked role will be created if it's absent in the Amazon Web Services account when the QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps", options: [ { name: "--display-name", @@ -2703,6 +2703,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--role-arn", + description: + "The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group.The Amazon Resource Name (ARN) of an IAM role that has access to the S3 file that contains your list of users that belong to a group", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -2898,7 +2906,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-application", - description: "Updates an existing Amazon Q Business application", + description: + "Updates an existing Amazon Q Business application. A Amazon Q Apps service-linked role will be created if it's absent in the Amazon Web Services account when the QAppsConfiguration is enabled in the request. For more information, see Using service-linked roles for Q Apps", options: [ { name: "--application-id", diff --git a/src/aws/rds.ts b/src/aws/rds.ts index 22b9a1f6b7e..01bacad05f6 100644 --- a/src/aws/rds.ts +++ b/src/aws/rds.ts @@ -194,7 +194,7 @@ const completionSpec: Fig.Spec = { { name: "--apply-action", description: - "The pending maintenance action to apply to this resource. Valid Values: system-update, db-upgrade, hardware-maintenance, ca-certificate-rotation", + "The pending maintenance action to apply to this resource. Valid Values: ca-certificate-rotation db-upgrade hardware-maintenance os-upgrade system-update For more information about these actions, see Maintenance actions for Amazon Aurora or Maintenance actions for Amazon RDS", args: { name: "string", }, @@ -1322,12 +1322,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting", }, { name: "--no-enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting", }, { name: "--serverless-v2-scaling-configuration", @@ -1399,7 +1399,7 @@ const completionSpec: Fig.Spec = { { name: "--engine-lifecycle-support", description: - "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", + "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", args: { name: "string", }, @@ -8934,12 +8934,12 @@ const completionSpec: Fig.Spec = { { name: "--enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting when you create your Aurora Limitless Database DB cluster", }, { name: "--no-enable-limitless-database", description: - "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only", + "Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB clusters only This setting is no longer used. Instead use the ClusterScalabilityType setting when you create your Aurora Limitless Database DB cluster", }, { name: "--ca-certificate-identifier", @@ -11291,7 +11291,7 @@ const completionSpec: Fig.Spec = { { name: "--engine-lifecycle-support", description: - "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", + "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", args: { name: "string", }, @@ -11559,10 +11559,52 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--monitoring-interval", + description: + "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0", + args: { + name: "integer", + }, + }, + { + name: "--monitoring-role-arn", + description: + "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value", + args: { + name: "string", + }, + }, + { + name: "--enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--no-enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--performance-insights-kms-key-id", + description: + "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--performance-insights-retention-period", + description: + "The number of days to retain Performance Insights data. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error", + args: { + name: "integer", + }, + }, { name: "--engine-lifecycle-support", description: - "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", + "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", args: { name: "string", }, @@ -11832,10 +11874,52 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--monitoring-interval", + description: + "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify 0. If MonitoringRoleArn is specified, also set MonitoringInterval to a value other than 0. Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 Default: 0", + args: { + name: "integer", + }, + }, + { + name: "--monitoring-role-arn", + description: + "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is arn:aws:iam:123456789012:role/emaccess. If MonitoringInterval is set to a value other than 0, supply a MonitoringRoleArn value", + args: { + name: "string", + }, + }, + { + name: "--enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--no-enable-performance-insights", + description: + "Specifies whether to turn on Performance Insights for the DB cluster", + }, + { + name: "--performance-insights-kms-key-id", + description: + "The Amazon Web Services KMS key identifier for encryption of Performance Insights data. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. If you don't specify a value for PerformanceInsightsKMSKeyId, then Amazon RDS uses your default KMS key. There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region", + args: { + name: "string", + }, + }, + { + name: "--performance-insights-retention-period", + description: + "The number of days to retain Performance Insights data. Valid Values: 7 month * 31, where month is a number of months from 1-23. Examples: 93 (3 months * 31), 341 (11 months * 31), 589 (19 months * 31) 731 Default: 7 days If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error", + args: { + name: "integer", + }, + }, { name: "--engine-lifecycle-support", description: - "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora (PostgreSQL only) - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", + "The life cycle type for this DB cluster. By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date. You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections: Amazon Aurora - Using Amazon RDS Extended Support in the Amazon Aurora User Guide Amazon RDS - Using Amazon RDS Extended Support in the Amazon RDS User Guide Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled Default: open-source-rds-extended-support", args: { name: "string", }, diff --git a/src/aws/redshift-data.ts b/src/aws/redshift-data.ts index 67bed85724b..25ed6d1dc3d 100644 --- a/src/aws/redshift-data.ts +++ b/src/aws/redshift-data.ts @@ -40,6 +40,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--result-format", + description: + "The data format of the result of the SQL statement. If no format is specified, the default is JSON", + args: { + name: "string", + }, + }, { name: "--secret-arn", description: @@ -352,6 +360,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--result-format", + description: + "The data format of the result of the SQL statement. If no format is specified, the default is JSON", + args: { + name: "string", + }, + }, { name: "--secret-arn", description: @@ -430,7 +446,63 @@ const completionSpec: Fig.Spec = { { name: "get-statement-result", description: - "Fetches the temporarily cached result of an SQL statement. A token is returned to page through the statement results. For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide", + "Fetches the temporarily cached result of an SQL statement in JSON format. The ExecuteStatement or BatchExecuteStatement operation that ran the SQL statement must have specified ResultFormat as JSON , or let the format default to JSON. A token is returned to page through the statement results. For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide", + options: [ + { + name: "--id", + description: + "The identifier of the SQL statement whose results are to be fetched. This value is a universally unique identifier (UUID) generated by Amazon Redshift Data API. A suffix indicates then number of the SQL statement. For example, d9b6c0c9-0747-4bf4-b142-e8883122f766:2 has a suffix of :2 that indicates the second SQL statement of a batch query. This identifier is returned by BatchExecuteStatment, ExecuteStatment, and ListStatements", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned NextToken value in the next NextToken parameter and retrying the command. If the NextToken field is empty, all response records have been retrieved for the request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "get-statement-result-v2", + description: + "Fetches the temporarily cached result of an SQL statement in CSV format. The ExecuteStatement or BatchExecuteStatement operation that ran the SQL statement must have specified ResultFormat as CSV. A token is returned to page through the statement results. For more information about the Amazon Redshift Data API and CLI usage examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide", options: [ { name: "--id", diff --git a/src/aws/redshift-serverless.ts b/src/aws/redshift-serverless.ts index ee4c0599768..182667d5419 100644 --- a/src/aws/redshift-serverless.ts +++ b/src/aws/redshift-serverless.ts @@ -612,6 +612,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--price-performance-target", + description: + "An object that represents the price performance target settings for the workgroup", + args: { + name: "structure", + }, + }, { name: "--publicly-accessible", description: @@ -3060,6 +3068,14 @@ const completionSpec: Fig.Spec = { name: "integer", }, }, + { + name: "--price-performance-target", + description: + "An object that represents the price performance target settings for the workgroup", + args: { + name: "structure", + }, + }, { name: "--publicly-accessible", description: diff --git a/src/aws/redshift.ts b/src/aws/redshift.ts index 5c498e4de77..5a7c56dea8c 100644 --- a/src/aws/redshift.ts +++ b/src/aws/redshift.ts @@ -1452,7 +1452,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-integration", - description: "Creates a zero-ETL integration with Amazon Redshift", + description: + "Creates a zero-ETL integration or S3 event integration with Amazon Redshift", options: [ { name: "--source-arn", @@ -2354,7 +2355,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-integration", - description: "Deletes a zero-ETL integration with Amazon Redshift", + description: + "Deletes a zero-ETL integration or S3 event integration with Amazon Redshift", options: [ { name: "--integration-arn", @@ -4570,7 +4572,7 @@ const completionSpec: Fig.Spec = { { name: "describe-integrations", description: - "Describes one or more zero-ETL integrations with Amazon Redshift", + "Describes one or more zero-ETL or S3 event integrations with Amazon Redshift", options: [ { name: "--integration-arn", @@ -5659,7 +5661,7 @@ const completionSpec: Fig.Spec = { { name: "--resource-type", description: - "The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide", + "The type of resource with which you want to view tags. Valid resource types are: Cluster CIDR/IP EC2 security group Snapshot Cluster security group Subnet group HSM connection HSM certificate Parameter group Snapshot copy grant Integration (zero-ETL integration or S3 event integration) To describe the tags associated with an integration, don't specify ResourceType, instead specify the ResourceName of the integration. For more information about Amazon Redshift resource types and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, Resources, and Principals in the Amazon Redshift Cluster Management Guide", args: { name: "string", }, @@ -7401,7 +7403,8 @@ const completionSpec: Fig.Spec = { }, { name: "modify-integration", - description: "Modifies a zero-ETL integration with Amazon Redshift", + description: + "Modifies a zero-ETL integration or S3 event integration with Amazon Redshift", options: [ { name: "--integration-arn", diff --git a/src/aws/resource-explorer-2.ts b/src/aws/resource-explorer-2.ts index a110a8e3091..69b4368d9d9 100644 --- a/src/aws/resource-explorer-2.ts +++ b/src/aws/resource-explorer-2.ts @@ -337,6 +337,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-managed-view", + description: + "Retrieves details of the specified Amazon Web Services-managed view", + options: [ + { + name: "--managed-view-arn", + description: "The Amazon resource name (ARN) of the managed view", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "get-view", description: "Retrieves details of the specified view", @@ -520,6 +551,78 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-managed-views", + description: + "Lists the Amazon resource names (ARNs) of the Amazon Web Services-managed views available in the Amazon Web Services Region in which you call this operation", + options: [ + { + name: "--max-results", + description: + "The maximum number of results that you want included on each page of the response. If you do not include this parameter, it defaults to a value appropriate to the operation. If additional items exist beyond those included in the current response, the NextToken response element is present and has a value (is not null). Include that value as the NextToken request parameter in the next call to the operation to get the next part of the results. An API operation can return fewer results than the maximum even when there are more results available. You should check NextToken after every operation to ensure that you receive all of the results", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: + "The parameter for receiving additional results if you receive a NextToken response in a previous request. A NextToken response indicates that more output is available. Set this parameter to the value of the previous call's NextToken response to indicate where the output should continue from. The pagination tokens expire after 24 hours", + args: { + name: "string", + }, + }, + { + name: "--service-principal", + description: + "Specifies a service principal name. If specified, then the operation only returns the managed views that are managed by the input service", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-resources", description: diff --git a/src/aws/route53.ts b/src/aws/route53.ts index e201b99068d..65f43fcbebf 100644 --- a/src/aws/route53.ts +++ b/src/aws/route53.ts @@ -440,7 +440,7 @@ const completionSpec: Fig.Spec = { { name: "create-query-logging-config", description: - "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group. DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following: Route 53 edge location that responded to the DNS query Domain or subdomain that was requested DNS record type, such as A or AAAA DNS response code, such as NoError or ServFail Log Group and Resource Policy Before you create a query logging configuration, perform the following operations. If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following: You must create the log group in the us-east-1 region. You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for. When you create log groups for query logging, we recommend that you use a consistent prefix, for example: /aws/route53/hosted zone name In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example: arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/* To avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values: For aws:SourceArn, supply the hosted zone ARN used in creating the query logging configuration. For example, aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID. For aws:SourceAccount, supply the account ID for the account that creates the query logging configuration. For example, aws:SourceAccount:111111111111. For more information, see The confused deputy problem in the Amazon Web Services IAM User Guide. You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI. Log Streams and Edge Locations When Route 53 finishes creating the configuration for DNS query logging, it does the following: Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location. Begins to send query logs to the applicable log stream. The name of each log stream is in the following format: hosted zone ID/edge location code The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page. Queries That Are Logged Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide. Log File Format For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide. Pricing For information about charges for query logs, see Amazon CloudWatch Pricing. How to Stop Logging If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig", + "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group. DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following: Route 53 edge location that responded to the DNS query Domain or subdomain that was requested DNS record type, such as A or AAAA DNS response code, such as NoError or ServFail Log Group and Resource Policy Before you create a query logging configuration, perform the following operations. If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following: You must create the log group in the us-east-1 region. You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for. When you create log groups for query logging, we recommend that you use a consistent prefix, for example: /aws/route53/hosted zone name In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging. Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. You must create the CloudWatch Logs resource policy in the us-east-1 region. For the value of Resource, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *, for example: arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/* To avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values: For aws:SourceArn, supply the hosted zone ARN used in creating the query logging configuration. For example, aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID. For aws:SourceAccount, supply the account ID for the account that creates the query logging configuration. For example, aws:SourceAccount:111111111111. For more information, see The confused deputy problem in the Amazon Web Services IAM User Guide. You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI. Log Streams and Edge Locations When Route 53 finishes creating the configuration for DNS query logging, it does the following: Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location. Begins to send query logs to the applicable log stream. The name of each log stream is in the following format: hosted zone ID/edge location code The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page. Queries That Are Logged Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide. Log File Format For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide. Pricing For information about charges for query logs, see Amazon CloudWatch Pricing. How to Stop Logging If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig", options: [ { name: "--hosted-zone-id", @@ -1456,7 +1456,7 @@ const completionSpec: Fig.Spec = { { name: "get-hosted-zone", description: - "Gets information about a specified hosted zone including the four name servers assigned to the hosted zone", + "Gets information about a specified hosted zone including the four name servers assigned to the hosted zone. returns the VPCs associated with the specified hosted zone and does not reflect the VPC associations by Route 53 Profiles. To get the associations to a Profile, call the ListProfileAssociations API", options: [ { name: "--id", @@ -2195,7 +2195,7 @@ const completionSpec: Fig.Spec = { { name: "list-hosted-zones-by-vpc", description: - "Lists all the private hosted zones that a specified VPC is associated with, regardless of which Amazon Web Services account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values: An OwningAccount element, which contains the account number of either the current Amazon Web Services account or another Amazon Web Services account. Some services, such as Cloud Map, create hosted zones using the current account. An OwningService element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com. When listing private hosted zones, the hosted zone and the Amazon VPC must belong to the same partition where the hosted zones were created. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition. The following are the supported partitions: aws - Amazon Web Services Regions aws-cn - China Regions aws-us-gov - Amazon Web Services GovCloud (US) Region For more information, see Access Management in the Amazon Web Services General Reference", + "Lists all the private hosted zones that a specified VPC is associated with, regardless of which Amazon Web Services account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner structure in the response contains one of the following values: An OwningAccount element, which contains the account number of either the current Amazon Web Services account or another Amazon Web Services account. Some services, such as Cloud Map, create hosted zones using the current account. An OwningService element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com. ListHostedZonesByVPC returns the hosted zones associated with the specified VPC and does not reflect the hosted zone associations to VPCs via Route 53 Profiles. To get the associations to a Profile, call the ListProfileResourceAssociations API. When listing private hosted zones, the hosted zone and the Amazon VPC must belong to the same partition where the hosted zones were created. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition. The following are the supported partitions: aws - Amazon Web Services Regions aws-cn - China Regions aws-us-gov - Amazon Web Services GovCloud (US) Region For more information, see Access Management in the Amazon Web Services General Reference", options: [ { name: "--vpc-id", diff --git a/src/aws/sagemaker.ts b/src/aws/sagemaker.ts index 0e14b3cd7f6..cbbc9908636 100644 --- a/src/aws/sagemaker.ts +++ b/src/aws/sagemaker.ts @@ -127,6 +127,46 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "batch-delete-cluster-nodes", + description: + "Deletes specific nodes within a SageMaker HyperPod cluster. BatchDeleteClusterNodes accepts a cluster name and a list of node IDs. To safeguard your work, back up your data to Amazon S3 or an FSx for Lustre file system before invoking the API on a worker node group. This will help prevent any potential data loss from the instance root volume. For more information about backup, see Use the backup script provided by SageMaker HyperPod. If you want to invoke this API on an existing cluster, you'll first need to patch the cluster by running the UpdateClusterSoftware API. For more information about patching a cluster, see Update the SageMaker HyperPod platform software of a cluster", + options: [ + { + name: "--cluster-name", + description: + "The name of the SageMaker HyperPod cluster from which to delete the specified nodes", + args: { + name: "string", + }, + }, + { + name: "--node-ids", + description: + "A list of node IDs to be deleted from the specified cluster. For SageMaker HyperPod clusters using the Slurm workload manager, you cannot remove instances that are configured as Slurm controller nodes", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "batch-describe-model-package", description: @@ -1289,7 +1329,8 @@ const completionSpec: Fig.Spec = { }, { name: "--default-space-settings", - description: "The default settings used to create a space", + description: + "The default settings for shared spaces that users create in the domain", args: { name: "structure", }, @@ -3367,6 +3408,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--model-life-cycle", + description: + "A structure describing the current state of the model in its life cycle", + args: { + name: "structure", + }, + }, { name: "--cli-input-json", description: @@ -3668,7 +3717,7 @@ const completionSpec: Fig.Spec = { { name: "--accelerator-types", description: - "A list of Elastic Inference (EI) instance types to associate with this notebook instance. Currently, only one instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of EI instance types to associate with this notebook instance", args: { name: "list", }, @@ -3975,7 +4024,7 @@ const completionSpec: Fig.Spec = { { name: "create-presigned-domain-url", description: - "Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page", + "Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM. The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app. You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint . The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page. The JupyterLab session default expiration time is 12 hours. You can configure this value using SessionExpirationDurationInSeconds", options: [ { name: "--domain-id", @@ -19242,7 +19291,7 @@ const completionSpec: Fig.Spec = { { name: "update-cluster-software", description: - "Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster", + "Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster. The UpgradeClusterSoftware API call may impact your SageMaker HyperPod cluster uptime and availability. Plan accordingly to mitigate potential disruptions to your workloads", options: [ { name: "--cluster-name", @@ -19499,7 +19548,7 @@ const completionSpec: Fig.Spec = { { name: "--default-space-settings", description: - "The default settings used to create a space within the domain", + "The default settings for shared spaces that users create in the domain", args: { name: "structure", }, @@ -20387,6 +20436,22 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--model-life-cycle", + description: + "A structure describing the current state of the model in its life cycle", + args: { + name: "structure", + }, + }, + { + name: "--client-token", + description: + "A unique token that guarantees that the call to this API is idempotent", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -20570,7 +20635,7 @@ const completionSpec: Fig.Spec = { { name: "--accelerator-types", description: - "A list of the Elastic Inference (EI) instance types to associate with this notebook instance. Currently only one EI instance type can be associated with a notebook instance. For more information, see Using Elastic Inference in Amazon SageMaker", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to associate with this notebook instance", args: { name: "list", }, @@ -20578,12 +20643,12 @@ const completionSpec: Fig.Spec = { { name: "--disassociate-accelerator-types", description: - "A list of the Elastic Inference (EI) instance types to remove from this notebook instance. This operation is idempotent. If you specify an accelerator type that is not associated with the notebook instance when you call this method, it does not throw an error", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance", }, { name: "--no-disassociate-accelerator-types", description: - "A list of the Elastic Inference (EI) instance types to remove from this notebook instance. This operation is idempotent. If you specify an accelerator type that is not associated with the notebook instance when you call this method, it does not throw an error", + "This parameter is no longer supported. Elastic Inference (EI) is no longer available. This parameter was used to specify a list of the EI instance types to remove from this notebook instance", }, { name: "--disassociate-default-code-repository", diff --git a/src/aws/socialmessaging.ts b/src/aws/socialmessaging.ts index c63676b1dc8..063318d5496 100644 --- a/src/aws/socialmessaging.ts +++ b/src/aws/socialmessaging.ts @@ -544,5 +544,4 @@ const completionSpec: Fig.Spec = { }, ], }; - export default completionSpec; diff --git a/src/aws/storagegateway.ts b/src/aws/storagegateway.ts index e44ec66aacc..f50e6b9d66f 100644 --- a/src/aws/storagegateway.ts +++ b/src/aws/storagegateway.ts @@ -1,7 +1,7 @@ const completionSpec: Fig.Spec = { name: "storagegateway", description: - "Storage Gateway Service Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", + "Storage Gateway Service Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post. Storage Gateway is the service that connects an on-premises software appliance with cloud-based storage to provide seamless and secure integration between an organization's on-premises IT environment and the Amazon Web Services storage infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery. Use the following links to get started using the Storage Gateway Service API Reference: Storage Gateway required request headers: Describes the required headers that you must send with every POST request to Storage Gateway. Signing requests: Storage Gateway requires that you authenticate every request you send; this topic describes how sign such a request. Error responses: Provides reference information about Storage Gateway errors. Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and examples of requests and responses. Storage Gateway endpoints and quotas: Provides a list of each Amazon Web Services Region and the endpoints available for use with Storage Gateway. Storage Gateway resource IDs are in uppercase. When you use these resource IDs with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change your resource ID to lowercase to use it with the EC2 API. For example, in Storage Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use this ID with the EC2 API, you must change it to vol-aa22bb012345daf670. Otherwise, the EC2 API might not behave as expected. IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway volumes are changing to a longer format. Starting in December 2016, all new volumes and snapshots will be created with a 17-character string. Starting in April 2016, you will be able to use these longer IDs so you can test your systems with the new format. For more information, see Longer EC2 and EBS resource IDs. For example, a volume Amazon Resource Name (ARN) with the longer volume ID format looks like the following: arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG. A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. For more information, see Announcement: Heads-up \u2013 Longer Storage Gateway volume and snapshot IDs coming in 2016", subcommands: [ { name: "activate-gateway", @@ -42,7 +42,7 @@ const completionSpec: Fig.Spec = { { name: "--gateway-type", description: - "A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED. Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB", + "A value that defines the type of gateway to activate. The type specified is critical to all later functions of the gateway and cannot be changed after activation. The default value is CACHED. Amazon FSx File Gateway is no longer available to new customers. Existing customers of FSx File Gateway can continue to use the service normally. For capabilities similar to FSx File Gateway, visit this blog post. Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB", args: { name: "string", }, @@ -709,7 +709,7 @@ const completionSpec: Fig.Spec = { { name: "--location-arn", description: - "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::my-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", + "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::amzn-s3-demo-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", args: { name: "string", }, @@ -787,7 +787,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid NFS file share name can only contain the following characters: a-z, A-Z, 0-9, -, ., and _", args: { name: "string", }, @@ -908,7 +908,7 @@ const completionSpec: Fig.Spec = { { name: "--location-arn", description: - "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::my-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", + "A custom ARN for the backend storage used for storing data for file shares. It includes a resource ARN with an optional prefix concatenation. The prefix must end with a forward slash (/). You can specify LocationARN as a bucket ARN, access point ARN or access point alias, as shown in the following examples. Bucket ARN: arn:aws:s3:::amzn-s3-demo-bucket/prefix/ Access point ARN: arn:aws:s3:region:account-id:accesspoint/access-point-name/prefix/ If you specify an access point, the bucket policy must be configured to delegate access control to the access point. For information, see Delegating access control to access points in the Amazon S3 User Guide. Access point alias: test-ap-ab123cdef4gehijklmn5opqrstuvuse1a-s3alias", args: { name: "string", }, @@ -1038,7 +1038,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + 'The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid SMB file share name cannot contain the following characters: [,],#,;,<,>,:,",\\,/,|,?,*,+, or ASCII control characters 1-31', args: { name: "string", }, @@ -4539,7 +4539,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid NFS file share name can only contain the following characters: a-z, A-Z, 0-9, -, ., and _", args: { name: "string", }, @@ -4734,7 +4734,7 @@ const completionSpec: Fig.Spec = { { name: "--file-share-name", description: - "The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used", + 'The name of the file share. Optional. FileShareName must be set if an S3 prefix name is set in LocationARN, or if an access point or access point alias is used. A valid SMB file share name cannot contain the following characters: [,],#,;,<,>,:,",\\,/,|,?,*,+, or ASCII control characters 1-31', args: { name: "string", }, diff --git a/src/aws/supplychain.ts b/src/aws/supplychain.ts index 49468cde395..3216c867c6b 100644 --- a/src/aws/supplychain.ts +++ b/src/aws/supplychain.ts @@ -53,7 +53,7 @@ const completionSpec: Fig.Spec = { { name: "create-data-integration-flow", description: - "Create DataIntegrationFlow to map one or more different sources to one target using the SQL transformation query", + "Enables you to programmatically create a data pipeline to ingest data from source systems such as Amazon S3 buckets, to a predefined Amazon Web Services Supply Chain dataset (product, inbound_order) or a temporary dataset along with the data transformation query provided with the API", options: [ { name: "--instance-id", @@ -120,7 +120,8 @@ const completionSpec: Fig.Spec = { }, { name: "create-data-lake-dataset", - description: "Create a data lake dataset", + description: + "Enables you to programmatically create an Amazon Web Services Supply Chain data lake dataset. Developers can create the datasets using their pre-defined or custom schema for a given instance ID, namespace, and dataset name", options: [ { name: "--instance-id", @@ -190,7 +191,7 @@ const completionSpec: Fig.Spec = { { name: "create-instance", description: - "Create a new instance for AWS Supply Chain. This is an asynchronous operation. Upon receiving a CreateInstance request, AWS Supply Chain immediately returns the instance resource, with instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance", + "Enables you to programmatically create an Amazon Web Services Supply Chain instance by applying KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a CreateInstance request, Amazon Web Services Supply Chain immediately returns the instance resource, instance ID, and the initializing state while simultaneously creating all required Amazon Web Services resources for an instance creation. You can use GetInstance to check the status of the instance. If the instance results in an unhealthy state, you need to check the error message, delete the current instance, and recreate a new one based on the mitigation from the error message", options: [ { name: "--instance-name", @@ -250,7 +251,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-data-integration-flow", - description: "Delete the DataIntegrationFlow", + description: + "Enable you to programmatically delete an existing data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name", options: [ { name: "--instance-id", @@ -288,7 +290,8 @@ const completionSpec: Fig.Spec = { }, { name: "delete-data-lake-dataset", - description: "Delete a data lake dataset", + description: + "Enables you to programmatically delete an Amazon Web Services Supply Chain data lake dataset. Developers can delete the existing datasets for a given instance ID, namespace, and instance name", options: [ { name: "--instance-id", @@ -335,7 +338,7 @@ const completionSpec: Fig.Spec = { { name: "delete-instance", description: - "Delete the instance. This is an asynchronous operation. Upon receiving a DeleteInstance request, AWS Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status", + "Enables you to programmatically delete an Amazon Web Services Supply Chain instance by deleting the KMS keys and relevant information associated with the API without using the Amazon Web Services console. This is an asynchronous operation. Upon receiving a DeleteInstance request, Amazon Web Services Supply Chain immediately returns a response with the instance resource, delete state while cleaning up all Amazon Web Services resources created during the instance creation process. You can use the GetInstance action to check the instance status", options: [ { name: "--instance-id", @@ -402,7 +405,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-data-integration-flow", - description: "View the DataIntegrationFlow details", + description: + "Enables you to programmatically view a specific data pipeline for the provided Amazon Web Services Supply Chain instance and DataIntegrationFlow name", options: [ { name: "--instance-id", @@ -440,7 +444,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-data-lake-dataset", - description: "Get a data lake dataset", + description: + "Enables you to programmatically view an Amazon Web Services Supply Chain data lake dataset. Developers can view the data lake dataset information such as namespace, schema, and so on for a given instance ID, namespace, and dataset name", options: [ { name: "--instance-id", @@ -487,7 +492,8 @@ const completionSpec: Fig.Spec = { }, { name: "get-instance", - description: "Get the AWS Supply Chain instance details", + description: + "Enables you to programmatically retrieve the information related to an Amazon Web Services Supply Chain instance ID", options: [ { name: "--instance-id", @@ -517,7 +523,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-data-integration-flows", - description: "Lists all the DataIntegrationFlows in a paginated way", + description: + "Enables you to programmatically list all data pipelines for the provided Amazon Web Services Supply Chain instance", options: [ { name: "--instance-id", @@ -589,7 +596,7 @@ const completionSpec: Fig.Spec = { { name: "list-data-lake-datasets", description: - "List the data lake datasets for a specific instance and name space", + "Enables you to programmatically view the list of Amazon Web Services Supply Chain data lake datasets. Developers can view the datasets and the corresponding information such as namespace, schema, and so on for a given instance ID and namespace", options: [ { name: "--instance-id", @@ -667,7 +674,8 @@ const completionSpec: Fig.Spec = { }, { name: "list-instances", - description: "List all the AWS Supply Chain instances in a paginated way", + description: + "List all Amazon Web Services Supply Chain instances for a specific account. Enables you to programmatically list all Amazon Web Services Supply Chain instances based on their account ID, instance name, and state of the instance (active or delete)", options: [ { name: "--next-token", @@ -745,7 +753,7 @@ const completionSpec: Fig.Spec = { { name: "list-tags-for-resource", description: - "List all the tags for an Amazon Web ServicesSupply Chain resource", + "List all the tags for an Amazon Web ServicesSupply Chain resource. You can list all the tags added to a resource. By listing the tags, developers can view the tag level information on a resource and perform actions such as, deleting a resource associated with a particular tag", options: [ { name: "--resource-arn", @@ -845,7 +853,7 @@ const completionSpec: Fig.Spec = { { name: "tag-resource", description: - "Create tags for an Amazon Web Services Supply chain resource", + "You can create tags during or after creating a resource such as instance, data flow, or dataset in AWS Supply chain. During the data ingestion process, you can add tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets. You can use these tags to identify a group of resources or a single resource used by the developer", options: [ { name: "--resource-arn", @@ -885,7 +893,7 @@ const completionSpec: Fig.Spec = { { name: "untag-resource", description: - "Delete tags for an Amazon Web Services Supply chain resource", + "You can delete tags for an Amazon Web Services Supply chain resource such as instance, data flow, or dataset in AWS Supply Chain. During the data ingestion process, you can delete tags such as dev, test, or prod to data flows created during the data ingestion process in the AWS Supply Chain datasets", options: [ { name: "--resource-arn", @@ -924,7 +932,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-data-integration-flow", - description: "Update the DataIntegrationFlow", + description: + "Enables you to programmatically update an existing data pipeline to ingest data from the source systems such as, Amazon S3 buckets, to a predefined Amazon Web Services Supply Chain dataset (product, inbound_order) or a temporary dataset along with the data transformation query provided with the API", options: [ { name: "--instance-id", @@ -986,7 +995,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-data-lake-dataset", - description: "Update a data lake dataset", + description: + "Enables you to programmatically update an Amazon Web Services Supply Chain data lake dataset. Developers can update the description of a data lake dataset for a given instance ID, namespace, and dataset name", options: [ { name: "--instance-id", @@ -1039,7 +1049,8 @@ const completionSpec: Fig.Spec = { }, { name: "update-instance", - description: "Update the instance", + description: + "Enables you to programmatically update an Amazon Web Services Supply Chain instance description by providing all the relevant information such as account ID, instance ID and so on without using the AWS console", options: [ { name: "--instance-id", diff --git a/src/aws/synthetics.ts b/src/aws/synthetics.ts index 39ef6c39c93..9f42b6b34cb 100644 --- a/src/aws/synthetics.ts +++ b/src/aws/synthetics.ts @@ -136,6 +136,14 @@ const completionSpec: Fig.Spec = { name: "list", }, }, + { + name: "--provisioned-resource-cleanup", + description: + "Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If you omit this parameter, the default of AUTOMATIC is used, which means that the Lambda functions and layers will be deleted when the canary is deleted. If the value of this parameter is OFF, then the value of the DeleteLambda parameter of the DeleteCanary operation determines whether the Lambda functions and layers will be deleted", + args: { + name: "string", + }, + }, { name: "--tags", description: @@ -214,7 +222,7 @@ const completionSpec: Fig.Spec = { { name: "delete-canary", description: - "Permanently deletes the specified canary. If you specify DeleteLambda to true, CloudWatch Synthetics also deletes the Lambda functions and layers that are used by the canary. Other resources used and created by the canary are not automatically deleted. After you delete a canary that you do not intend to use again, you should also delete the following: The CloudWatch alarms created for this canary. These alarms have a name of Synthetics-Alarm-first-198-characters-of-canary-name-canaryId-alarm number Amazon S3 objects and buckets, such as the canary's artifact location. IAM roles created for the canary. If they were created in the console, these roles have the name role/service-role/CloudWatchSyntheticsRole-First-21-Characters-of-CanaryName CloudWatch Logs log groups created for the canary. These logs groups have the name /aws/lambda/cwsyn-First-21-Characters-of-CanaryName Before you delete a canary, you might want to use GetCanary to display the information about this canary. Make note of the information returned by this operation so that you can delete these resources after you delete the canary", + "Permanently deletes the specified canary. If the canary's ProvisionedResourceCleanup field is set to AUTOMATIC or you specify DeleteLambda in this operation as true, CloudWatch Synthetics also deletes the Lambda functions and layers that are used by the canary. Other resources used and created by the canary are not automatically deleted. After you delete a canary, you should also delete the following: The CloudWatch alarms created for this canary. These alarms have a name of Synthetics-Alarm-first-198-characters-of-canary-name-canaryId-alarm number Amazon S3 objects and buckets, such as the canary's artifact location. IAM roles created for the canary. If they were created in the console, these roles have the name role/service-role/CloudWatchSyntheticsRole-First-21-Characters-of-CanaryName CloudWatch Logs log groups created for the canary. These logs groups have the name /aws/lambda/cwsyn-First-21-Characters-of-CanaryName Before you delete a canary, you might want to use GetCanary to display the information about this canary. Make note of the information returned by this operation so that you can delete these resources after you delete the canary", options: [ { name: "--name", @@ -227,12 +235,12 @@ const completionSpec: Fig.Spec = { { name: "--delete-lambda", description: - "Specifies whether to also delete the Lambda functions and layers used by this canary. The default is false. Type: Boolean", + "Specifies whether to also delete the Lambda functions and layers used by this canary. The default is false. Your setting for this parameter is used only if the canary doesn't have AUTOMATIC for its ProvisionedResourceCleanup field. If that field is set to AUTOMATIC, then the Lambda functions and layers will be deleted when this canary is deleted. Type: Boolean", }, { name: "--no-delete-lambda", description: - "Specifies whether to also delete the Lambda functions and layers used by this canary. The default is false. Type: Boolean", + "Specifies whether to also delete the Lambda functions and layers used by this canary. The default is false. Your setting for this parameter is used only if the canary doesn't have AUTOMATIC for its ProvisionedResourceCleanup field. If that field is set to AUTOMATIC, then the Lambda functions and layers will be deleted when this canary is deleted. Type: Boolean", }, { name: "--cli-input-json", @@ -979,6 +987,14 @@ const completionSpec: Fig.Spec = { name: "structure", }, }, + { + name: "--provisioned-resource-cleanup", + description: + "Specifies whether to also delete the Lambda functions and layers used by this canary when the canary is deleted. If the value of this parameter is OFF, then the value of the DeleteLambda parameter of the DeleteCanary operation determines whether the Lambda functions and layers will be deleted", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: diff --git a/src/aws/taxsettings.ts b/src/aws/taxsettings.ts index 64c9427bca2..d75a18cca35 100644 --- a/src/aws/taxsettings.ts +++ b/src/aws/taxsettings.ts @@ -37,7 +37,7 @@ const completionSpec: Fig.Spec = { { name: "batch-put-tax-registration", description: - "Adds or updates tax registration for multiple accounts in batch. This can be used to add or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information. By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", + "Adds or updates tax registration for multiple accounts in batch. This can be used to add or update tax registrations for up to five accounts in one batch. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia The sector valid values are Business and Individual. RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business. For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number. For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number. For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number. For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you\u2019re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", options: [ { name: "--account-ids", @@ -73,6 +73,38 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-supplemental-tax-registration", + description: + "Deletes a supplemental tax registration for a single account", + options: [ + { + name: "--authority-id", + description: + "The unique authority Id for the supplemental TRN information that needs to be deleted", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-tax-registration", description: @@ -174,6 +206,69 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-supplemental-tax-registrations", + description: + "Retrieves supplemental tax registrations for a single account", + options: [ + { + name: "--max-results", + description: + "The number of taxRegistrations results you want in one response", + args: { + name: "integer", + }, + }, + { + name: "--next-token", + description: "The token to retrieve the next set of results", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-tax-registrations", description: @@ -237,10 +332,41 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "put-supplemental-tax-registration", + description: "Stores supplemental tax registration for a single account", + options: [ + { + name: "--tax-registration-entry", + description: + "The supplemental TRN information that will be stored for the caller account ID", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "put-tax-registration", description: - "Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia If you use this operation to set a tax registration number (TRN) in Malaysia, only resellers with a valid sales and service tax (SST) number are required to provide tax registration information. By using this API operation to set a TRN in Malaysia, Amazon Web Services will regard you as self-declaring that you're an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD) and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. If you're not a reseller of Amazon Web Services, we don't recommend that you use this operation to set the TRN in Malaysia. Only use this API operation to upload the TRNs for accounts through which you're reselling Amazon Web Services. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", + "Adds or updates tax registration for a single account. You can't set a TRN if there's a pending TRN. You'll need to delete the pending TRN first. To call this API operation for specific countries, see the following country-specific requirements. Bangladesh You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Brazil You must complete the tax registration process in the Payment preferences page in the Billing and Cost Management console. After your TRN and billing address are verified, you can call this API operation. For Amazon Web Services accounts created through Organizations, you can call this API operation when you don't have a billing address. Georgia The valid personType values are Physical Person and Business. Kenya You must specify the personType in the kenyaAdditionalInfo field of the additionalTaxInformation object. If the personType is Physical Person, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Malaysia The sector valid values are Business and Individual. RegistrationType valid values are NRIC for individual, and TIN and sales and service tax (SST) for Business. For individual, you can specify the taxInformationNumber in MalaysiaAdditionalInfo with NRIC type, and a valid MyKad or NRIC number. For business, you must specify a businessRegistrationNumber in MalaysiaAdditionalInfo with a TIN type and tax identification number. For business resellers, you must specify a businessRegistrationNumber and taxInformationNumber in MalaysiaAdditionalInfo with a sales and service tax (SST) type and a valid SST number. For business resellers with service codes, you must specify businessRegistrationNumber, taxInformationNumber, and distinct serviceTaxCodes in MalaysiaAdditionalInfo with a SST type and valid sales and service tax (SST) number. By using this API operation, Amazon Web Services registers your self-declaration that you\u2019re an authorized business reseller registered with the Royal Malaysia Customs Department (RMCD), and have a valid SST number. Amazon Web Services reserves the right to seek additional information and/or take other actions to support your self-declaration as appropriate. Amazon Web Services is currently registered under the following service tax codes. You must include at least one of the service tax codes in the service tax code strings to declare yourself as an authorized registered business reseller. Taxable service and service tax codes: Consultancy - 9907061674 Training or coaching service - 9907071685 IT service - 9907101676 Digital services and electronic medium - 9907121690 Nepal The sector valid values are Business and Individual. Saudi Arabia For address, you must specify addressLine3. South Korea You must specify the certifiedEmailId and legalName in the TaxRegistrationEntry object. Use Korean characters for legalName. You must specify the businessRepresentativeName, itemOfBusiness, and lineOfBusiness in the southKoreaAdditionalInfo field of the additionalTaxInformation object. Use Korean characters for these fields. You must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. For the address object, use Korean characters for addressLine1, addressLine2 city, postalCode, and stateOrRegion. Spain You must specify the registrationType in the spainAdditionalInfo field of the additionalTaxInformation object. If the registrationType is Local, you must specify the tax registration certificate document in the taxRegistrationDocuments field of the VerificationDetails object. Turkey You must specify the sector in the taxRegistrationEntry object. If your sector is Business, Individual, or Government: Specify the taxOffice. If your sector is Individual, don't enter this value. (Optional) Specify the kepEmailId. If your sector is Individual, don't enter this value. Note: In the Tax Settings page of the Billing console, Government appears as Public institutions If your sector is Business and you're subject to KDV tax, you must specify your industry in the industries field. For address, you must specify districtOrCounty. Ukraine The sector valid values are Business and Individual", options: [ { name: "--account-id", diff --git a/src/aws/verifiedpermissions.ts b/src/aws/verifiedpermissions.ts index e9e381d9e75..49dd1fe85d3 100644 --- a/src/aws/verifiedpermissions.ts +++ b/src/aws/verifiedpermissions.ts @@ -3,6 +3,38 @@ const completionSpec: Fig.Spec = { description: 'Amazon Verified Permissions is a permissions management service from Amazon Web Services. You can use Verified Permissions to manage permissions for your application, and authorize user access based on those permissions. Using Verified Permissions, application developers can grant access based on information about the users, resources, and requested actions. You can also evaluate additional information like group membership, attributes of the resources, and session context, such as time of request and IP addresses. Verified Permissions manages these permissions by letting you create and store authorization policies for your applications, such as consumer-facing web sites and enterprise business systems. Verified Permissions uses Cedar as the policy language to express your permission requirements. Cedar supports both role-based access control (RBAC) and attribute-based access control (ABAC) authorization models. For more information about configuring, administering, and using Amazon Verified Permissions in your applications, see the Amazon Verified Permissions User Guide. For more information about the Cedar policy language, see the Cedar Policy Language Guide. When you write Cedar policies that reference principals, resources and actions, you can define the unique identifiers used for each of those elements. We strongly recommend that you follow these best practices: Use values like universally unique identifiers (UUIDs) for all principal and resource identifiers. For example, if user jane leaves the company, and you later let someone else use the name jane, then that new user automatically gets access to everything granted by policies that still reference User::"jane". Cedar can\u2019t distinguish between the new user and the old. This applies to both principal and resource identifiers. Always use identifiers that are guaranteed unique and never reused to ensure that you don\u2019t unintentionally grant access because of the presence of an old identifier in a policy. Where you use a UUID for an entity, we recommend that you follow it with the // comment specifier and the \u2018friendly\u2019 name of your entity. This helps to make your policies easier to understand. For example: principal == User::"a1b2c3d4-e5f6-a1b2-c3d4-EXAMPLE11111", // alice Do not include personally identifying, confidential, or sensitive information as part of the unique identifier for your principals or resources. These identifiers are included in log entries shared in CloudTrail trails. Several operations return structures that appear similar, but have different purposes. As new functionality is added to the product, the structure used in a parameter of one operation might need to change in a way that wouldn\'t make sense for the same parameter in a different operation. To help you understand the purpose of each, the following naming convention is used for the structures: Parameter type structures that end in Detail are used in Get operations. Parameter type structures that end in Item are used in List operations. Parameter type structures that use neither suffix are used in the mutating (create and update) operations', subcommands: [ + { + name: "batch-get-policy", + description: + "Retrieves information about a group (batch) of policies. The BatchGetPolicy operation doesn't have its own IAM permission. To authorize this operation for Amazon Web Services principals, include the permission verifiedpermissions:GetPolicy in their IAM policies", + options: [ + { + name: "--requests", + description: + "An array of up to 100 policies you want information about", + args: { + name: "list", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "batch-is-authorized", description: diff --git a/src/aws/workmail.ts b/src/aws/workmail.ts index d2076ff960c..7422d7151b1 100644 --- a/src/aws/workmail.ts +++ b/src/aws/workmail.ts @@ -339,6 +339,51 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "create-identity-center-application", + description: + "Creates the WorkMail application in IAM Identity Center that can be used later in the WorkMail - IdC integration. For more information, see PutIdentityProviderConfiguration. This action does not affect the authentication settings for any WorkMail organizations", + options: [ + { + name: "--name", + description: "The name of the IAM Identity Center application", + args: { + name: "string", + }, + }, + { + name: "--instance-arn", + description: "The Amazon Resource Name (ARN) of the instance", + args: { + name: "string", + }, + }, + { + name: "--client-token", + description: "The idempotency token associated with the request", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "create-impersonation-role", description: @@ -728,6 +773,14 @@ const completionSpec: Fig.Spec = { description: "If this parameter is enabled, the user will be hidden from the address book", }, + { + name: "--identity-provider-user-id", + description: + "User ID from the IAM Identity Center. If this parameter is empty it will be updated automatically when the user logs in for the first time to the mailbox associated with WorkMail", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -943,6 +996,68 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "delete-identity-center-application", + description: + "Deletes the IAM Identity Center application from WorkMail. This action does not affect the authentication settings for any WorkMail organizations", + options: [ + { + name: "--application-arn", + description: "The Amazon Resource Name (ARN) of the application", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-identity-provider-configuration", + description: + "Disables the integration between IdC and WorkMail. Authentication will continue with the directory as it was before the IdC integration. You might have to reset your directory passwords and reconfigure your desktop and mobile email clients", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "delete-impersonation-role", description: @@ -1155,6 +1270,54 @@ const completionSpec: Fig.Spec = { description: "Deletes a WorkMail organization even if the organization has enabled users", }, + { + name: "--delete-identity-center-application", + description: + "Deletes IAM Identity Center application for WorkMail. This action does not affect authentication settings for any organization", + }, + { + name: "--no-delete-identity-center-application", + description: + "Deletes IAM Identity Center application for WorkMail. This action does not affect authentication settings for any organization", + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, + { + name: "delete-personal-access-token", + description: + "Deletes the Personal Access Token from the provided WorkMail Organization", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--personal-access-token-id", + description: "The Personal Access Token ID", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: @@ -1478,6 +1641,37 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "describe-identity-provider-configuration", + description: + "Returns detailed information on the current IdC setup for the WorkMail organization", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "describe-inbound-dmarc-settings", description: @@ -2113,6 +2307,44 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "get-personal-access-token-metadata", + description: + "Requests details of a specific Personal Access Token within the WorkMail organization", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--personal-access-token-id", + description: "The Personal Access Token ID", + args: { + name: "string", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-access-control-rules", description: @@ -2894,6 +3126,83 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "list-personal-access-tokens", + description: "Returns a summary of your Personal Access Tokens", + options: [ + { + name: "--organization-id", + description: "The Organization ID", + args: { + name: "string", + }, + }, + { + name: "--user-id", + description: "The WorkMail User ID", + args: { + name: "string", + }, + }, + { + name: "--next-token", + description: + "The token from the previous response to query the next page", + args: { + name: "string", + }, + }, + { + name: "--max-results", + description: + "The maximum amount of items that should be returned in a response", + args: { + name: "integer", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--starting-token", + description: + "A token to specify where to start paginating. This is the\nNextToken from a previously truncated response.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "string", + }, + }, + { + name: "--page-size", + description: + "The size of each page to get in the AWS service call. This\ndoes not affect the number of items returned in the command's\noutput. Setting a smaller page size results in more calls to\nthe AWS service, retrieving fewer items in each call. This can\nhelp prevent the AWS service calls from timing out.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--max-items", + description: + "The total number of items to return in the command's output.\nIf the total number of items available is more than the value\nspecified, a NextToken is provided in the command's\noutput. To resume pagination, provide the\nNextToken value in the starting-token\nargument of a subsequent command. Do not use the\nNextToken response element directly outside of the\nAWS CLI.\nFor usage examples, see Pagination in the AWS Command Line Interface User\nGuide", + args: { + name: "integer", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "list-resource-delegates", description: @@ -3320,6 +3629,58 @@ const completionSpec: Fig.Spec = { }, ], }, + { + name: "put-identity-provider-configuration", + description: + "Enables integration between IAM Identity Center (IdC) and WorkMail to proxy authentication requests for mailbox users. You can connect your IdC directory or your external directory to WorkMail through IdC and manage access to WorkMail mailboxes in a single place. For enhanced protection, you could enable Multifactor Authentication (MFA) and Personal Access Tokens", + options: [ + { + name: "--organization-id", + description: "The ID of the WorkMail Organization", + args: { + name: "string", + }, + }, + { + name: "--authentication-mode", + description: "The authentication mode used in WorkMail", + args: { + name: "string", + }, + }, + { + name: "--identity-center-configuration", + description: "The details of the IAM Identity Center configuration", + args: { + name: "structure", + }, + }, + { + name: "--personal-access-token-configuration", + description: "The details of the Personal Access Token configuration", + args: { + name: "structure", + }, + }, + { + name: "--cli-input-json", + description: + "Performs service operation based on the JSON string provided. The JSON string follows the format provided by ``--generate-cli-skeleton``. If other arguments are provided on the command line, the CLI values will override the JSON-provided values. It is not possible to pass arbitrary binary values using a JSON-provided value as the string will be taken literally", + args: { + name: "string", + }, + }, + { + name: "--generate-cli-skeleton", + description: + "Prints a JSON skeleton to standard output without sending an API request. If provided with no value or the value ``input``, prints a sample input JSON that can be used as an argument for ``--cli-input-json``. If provided with the value ``output``, it validates the command inputs and returns a sample output JSON for that command", + args: { + name: "string", + suggestions: ["input", "output"], + }, + }, + ], + }, { name: "put-inbound-dmarc-settings", description: @@ -3987,7 +4348,7 @@ const completionSpec: Fig.Spec = { }, { name: "update-group", - description: "Updates attibutes in a group", + description: "Updates attributes in a group", options: [ { name: "--organization-id", @@ -4500,7 +4861,7 @@ const completionSpec: Fig.Spec = { }, { name: "--zip-code", - description: "Updates the user's zipcode", + description: "Updates the user's zip code", args: { name: "string", }, @@ -4526,6 +4887,14 @@ const completionSpec: Fig.Spec = { name: "string", }, }, + { + name: "--identity-provider-user-id", + description: + "User ID from the IAM Identity Center. If this parameter is empty it will be updated automatically when the user logs in for the first time to the mailbox associated with WorkMail", + args: { + name: "string", + }, + }, { name: "--cli-input-json", description: