diff --git a/.changes/1.36.7.json b/.changes/1.36.7.json new file mode 100644 index 0000000000..e3b00f53b0 --- /dev/null +++ b/.changes/1.36.7.json @@ -0,0 +1,27 @@ +[ + { + "category": "``bedrock-agent``", + "description": "Add support for the prompt caching feature for Bedrock Prompt Management", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "Raised the documentParameters size limit to 30 KB for AWS IoT Device Management - Jobs.", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release adds support for dynamic audio configuration and the ability to disable the deblocking filter for h265 encodes.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "Minor fix to ARN validation for Lambda functions passed to S3 Batch Operations", + "type": "api-change" + }, + { + "category": "Signing", + "description": "No longer sign transfer-encoding header for SigV4", + "type": "bugfix" + } +] \ No newline at end of file diff --git a/.changes/next-release/bugfix-Signing-82847.json b/.changes/next-release/bugfix-Signing-82847.json deleted file mode 100644 index 11d085e489..0000000000 --- a/.changes/next-release/bugfix-Signing-82847.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "bugfix", - "category": "Signing", - "description": "No longer sign transfer-encoding header for SigV4" -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a1d2dd4c69..daa6fc724d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ CHANGELOG ========= +1.36.7 +====== + +* api-change:``bedrock-agent``: Add support for the prompt caching feature for Bedrock Prompt Management +* api-change:``iot``: Raised the documentParameters size limit to 30 KB for AWS IoT Device Management - Jobs. +* api-change:``mediaconvert``: This release adds support for dynamic audio configuration and the ability to disable the deblocking filter for h265 encodes. +* api-change:``s3control``: Minor fix to ARN validation for Lambda functions passed to S3 Batch Operations +* bugfix:Signing: No longer sign transfer-encoding header for SigV4 + + 1.36.6 ====== diff --git a/botocore/__init__.py b/botocore/__init__.py index ff573cb325..9d2588519c 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.36.6' +__version__ = '1.36.7' class NullHandler(logging.Handler): diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index fa10799769..32cec388d4 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -1708,7 +1708,7 @@ }, "agentAliasStatus":{ "shape":"AgentAliasStatus", - "documentation":"
The status of the alias of the agent and whether it is ready for use. The following statuses are possible:
CREATING – The agent alias is being created.
PREPARED – The agent alias is finished being created or updated and is ready to be invoked.
FAILED – The agent alias API operation failed.
UPDATING – The agent alias is being updated.
DELETING – The agent alias is being deleted.
The status of the alias of the agent and whether it is ready for use. The following statuses are possible:
CREATING – The agent alias is being created.
PREPARED – The agent alias is finished being created or updated and is ready to be invoked.
FAILED – The agent alias API operation failed.
UPDATING – The agent alias is being updated.
DELETING – The agent alias is being deleted.
DISSOCIATED - The agent alias has no version associated with it.
Indicates that the CachePointBlock is of the default type
" + } + }, + "documentation":"Indicates where a cache checkpoint is located. All information before this checkpoint is cached to be accessed on subsequent requests.
" + }, + "CachePointType":{ + "type":"string", + "enum":["default"] + }, "ChatPromptTemplateConfiguration":{ "type":"structure", "required":["messages"], @@ -2678,6 +2694,10 @@ "ContentBlock":{ "type":"structure", "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"Creates a cache checkpoint within a message.
" + }, "text":{ "shape":"String", "documentation":"The text in the message.
" @@ -4910,7 +4930,7 @@ "FlowNodes":{ "type":"list", "member":{"shape":"FlowNode"}, - "max":20, + "max":40, "min":0 }, "FlowStatus":{ @@ -5092,6 +5112,14 @@ "shape":"UnknownConnectionTargetInputFlowValidationDetails", "documentation":"Details about an unknown target input for a connection.
" }, + "unknownNodeInput":{ + "shape":"UnknownNodeInputFlowValidationDetails", + "documentation":"Details about an unknown input for a node.
" + }, + "unknownNodeOutput":{ + "shape":"UnknownNodeOutputFlowValidationDetails", + "documentation":"Details about an unknown output for a node.
" + }, "unreachableNode":{ "shape":"UnreachableNodeFlowValidationDetails", "documentation":"Details about an unreachable node in the flow.
" @@ -5142,7 +5170,9 @@ "MultipleNodeInputConnections", "UnfulfilledNodeInput", "UnsatisfiedConnectionConditions", - "Unspecified" + "Unspecified", + "UnknownNodeInput", + "UnknownNodeOutput" ] }, "FlowValidations":{ @@ -7227,7 +7257,7 @@ "MaximumLength":{ "type":"integer", "box":true, - "max":4096, + "max":8192, "min":0 }, "MemoryConfiguration":{ @@ -8131,7 +8161,7 @@ "PromptInputVariablesList":{ "type":"list", "member":{"shape":"PromptInputVariable"}, - "max":5, + "max":10, "min":0, "sensitive":true }, @@ -9451,6 +9481,10 @@ "SystemContentBlock":{ "type":"structure", "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"Creates a cache checkpoint within a tool designation
" + }, "text":{ "shape":"NonEmptyString", "documentation":"The text in the system prompt.
" @@ -9544,6 +9578,10 @@ "type":"structure", "required":["text"], "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"A cache checkpoint within a template configuration.
" + }, "inputVariables":{ "shape":"PromptInputVariablesList", "documentation":"An array of the variables in the prompt template.
" @@ -9571,6 +9609,10 @@ "Tool":{ "type":"structure", "members":{ + "cachePoint":{ + "shape":"CachePointBlock", + "documentation":"Creates a cache checkpoint within a tool designation
" + }, "toolSpec":{ "shape":"ToolSpecification", "documentation":"The specification for the tool.
" @@ -9800,6 +9842,42 @@ }, "documentation":"Details about an unknown target input for a connection.
" }, + "UnknownNodeInputFlowValidationDetails":{ + "type":"structure", + "required":[ + "input", + "node" + ], + "members":{ + "input":{ + "shape":"FlowNodeInputName", + "documentation":"The name of the node with the unknown input.
" + }, + "node":{ + "shape":"FlowNodeName", + "documentation":"The name of the unknown input.
" + } + }, + "documentation":"Details about an unknown input for a node.
" + }, + "UnknownNodeOutputFlowValidationDetails":{ + "type":"structure", + "required":[ + "node", + "output" + ], + "members":{ + "node":{ + "shape":"FlowNodeName", + "documentation":"The name of the node with the unknown output.
" + }, + "output":{ + "shape":"FlowNodeOutputName", + "documentation":"The name of the unknown output.
" + } + }, + "documentation":"Details about an unknown output for a node.
" + }, "UnreachableNodeFlowValidationDetails":{ "type":"structure", "required":["node"], diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index ff6c6a3856..e021c01d43 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -23642,6 +23642,9 @@ "variants" : [ { "hostname" : "synthetics-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "synthetics.ca-central-1.api.aws", "tags" : [ "dualstack" ] @@ -23651,6 +23654,9 @@ "variants" : [ { "hostname" : "synthetics-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "synthetics.ca-west-1.api.aws", "tags" : [ "dualstack" ] @@ -23780,6 +23786,9 @@ "variants" : [ { "hostname" : "synthetics-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "synthetics.us-east-1.api.aws", "tags" : [ "dualstack" ] @@ -23789,6 +23798,9 @@ "variants" : [ { "hostname" : "synthetics-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "synthetics.us-east-2.api.aws", "tags" : [ "dualstack" ] @@ -23798,6 +23810,9 @@ "variants" : [ { "hostname" : "synthetics-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "synthetics.us-west-1.api.aws", "tags" : [ "dualstack" ] @@ -23807,6 +23822,9 @@ "variants" : [ { "hostname" : "synthetics-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "synthetics-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] }, { "hostname" : "synthetics.us-west-2.api.aws", "tags" : [ "dualstack" ] diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 4162a60d22..1d237434e8 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -17582,7 +17582,7 @@ }, "ParameterValue":{ "type":"string", - "max":512, + "max":30720, "min":1, "pattern":"[^\\p{C}]+" }, diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 518a698cc4..f41cdbbda9 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -2004,7 +2004,7 @@ "ExternalAudioFileInput": { "shape": "__stringPatternS3Https", "locationName": "externalAudioFileInput", - "documentation": "Specifies audio data from an external file source." + "documentation": "Specify the S3, HTTP, or HTTPS URL for your external audio file input." }, "HlsRenditionGroupSettings": { "shape": "HlsRenditionGroupSettings", @@ -2014,12 +2014,12 @@ "LanguageCode": { "shape": "LanguageCode", "locationName": "languageCode", - "documentation": "Selects a specific language code from within an audio source." + "documentation": "Specify the language to select from your audio input. In the MediaConvert console choose from a list of languages. In your JSON job settings choose from an ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php" }, "Offset": { "shape": "__integerMinNegative2147483648Max2147483647", "locationName": "offset", - "documentation": "Specifies a time delta in milliseconds to offset the audio from the input video." + "documentation": "Specify a time delta, in milliseconds, to offset the audio from the input video.\nTo specify no offset: Keep the default value, 0.\nTo specify an offset: Enter an integer from -2147483648 to 2147483647" }, "Pids": { "shape": "__listOf__integerMin1Max2147483647", @@ -4995,6 +4995,45 @@ "NO_DISPLAY_WINDOW" ] }, + "DynamicAudioSelector": { + "type": "structure", + "members": { + "AudioDurationCorrection": { + "shape": "AudioDurationCorrection", + "locationName": "audioDurationCorrection", + "documentation": "Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion. * Force: Apply audio duration correction, either Track or Frame depending on your input, regardless of the accuracy of your input's STTS table. Your output audio and video may not be aligned or it may contain audio artifacts." + }, + "ExternalAudioFileInput": { + "shape": "__stringPatternS3Https", + "locationName": "externalAudioFileInput", + "documentation": "Specify the S3, HTTP, or HTTPS URL for your external audio file input." + }, + "LanguageCode": { + "shape": "LanguageCode", + "locationName": "languageCode", + "documentation": "Specify the language to select from your audio input. In the MediaConvert console choose from a list of languages. In your JSON job settings choose from an ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php" + }, + "Offset": { + "shape": "__integerMinNegative2147483648Max2147483647", + "locationName": "offset", + "documentation": "Specify a time delta, in milliseconds, to offset the audio from the input video.\nTo specify no offset: Keep the default value, 0.\nTo specify an offset: Enter an integer from -2147483648 to 2147483647" + }, + "SelectorType": { + "shape": "DynamicAudioSelectorType", + "locationName": "selectorType", + "documentation": "Specify which audio tracks to dynamically select from your source. To select all audio tracks: Keep the default value, All tracks. To select all audio tracks with a specific language code: Choose Language code. When you do, you must also specify a language code under the Language code setting. If there is no matching Language code in your source, then no track will be selected." + } + }, + "documentation": "Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels." + }, + "DynamicAudioSelectorType": { + "type": "string", + "documentation": "Specify which audio tracks to dynamically select from your source. To select all audio tracks: Keep the default value, All tracks. To select all audio tracks with a specific language code: Choose Language code. When you do, you must also specify a language code under the Language code setting. If there is no matching Language code in your source, then no track will be selected.", + "enum": [ + "ALL_TRACKS", + "LANGUAGE_CODE" + ] + }, "Eac3AtmosBitstreamMode": { "type": "string", "documentation": "Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).", @@ -6427,6 +6466,14 @@ "MAIN_422_10BIT_HIGH" ] }, + "H265Deblocking": { + "type": "string", + "documentation": "Use Deblocking to improve the video quality of your output by smoothing the edges of macroblock artifacts created during video compression. To reduce blocking artifacts at block boundaries, and improve overall video quality: Keep the default value, Enabled. To not apply any deblocking: Choose Disabled. Visible block edge artifacts might appear in the output, especially at lower bitrates.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "H265DynamicSubGop": { "type": "string", "documentation": "Choose Adaptive to improve subjective video quality for high-motion content. This will cause the service to use fewer B-frames (which infer information based on other frames) for high-motion portions of the video and more B-frames for low-motion portions. The maximum number of B-frames is limited by the value you provide for the setting B frames between reference frames.", @@ -6602,6 +6649,11 @@ "locationName": "codecProfile", "documentation": "Represents the Profile and Tier, per the HEVC (H.265) specification. Selections are grouped as [Profile] / [Tier], so \"Main/High\" represents Main Profile with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License." }, + "Deblocking": { + "shape": "H265Deblocking", + "locationName": "deblocking", + "documentation": "Use Deblocking to improve the video quality of your output by smoothing the edges of macroblock artifacts created during video compression. To reduce blocking artifacts at block boundaries, and improve overall video quality: Keep the default value, Enabled. To not apply any deblocking: Choose Disabled. Visible block edge artifacts might appear in the output, especially at lower bitrates." + }, "DynamicSubGop": { "shape": "H265DynamicSubGop", "locationName": "dynamicSubGop", @@ -7652,6 +7704,11 @@ "locationName": "dolbyVisionMetadataXml", "documentation": "Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html." }, + "DynamicAudioSelectors": { + "shape": "__mapOfDynamicAudioSelector", + "locationName": "dynamicAudioSelectors", + "documentation": "Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels." + }, "FileInput": { "shape": "__stringMax2048PatternS3Https", "locationName": "fileInput", @@ -7889,6 +7946,11 @@ "locationName": "dolbyVisionMetadataXml", "documentation": "Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html." }, + "DynamicAudioSelectors": { + "shape": "__mapOfDynamicAudioSelector", + "locationName": "dynamicAudioSelectors", + "documentation": "Use Dynamic audio selectors when you do not know the track layout of your source when you submit your job, but want to select multiple audio tracks. When you include an audio track in your output and specify this Dynamic audio selector as the Audio source, MediaConvert creates an output audio track for each dynamically selected track. Note that when you include a Dynamic audio selector for two or more inputs, each input must have the same number of audio tracks and audio channels." + }, "FilterEnable": { "shape": "InputFilterEnable", "locationName": "filterEnable", @@ -8294,7 +8356,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInput", @@ -8465,7 +8527,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInputTemplate", @@ -14644,6 +14706,15 @@ "shape": "CaptionSelector" } }, + "__mapOfDynamicAudioSelector": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "DynamicAudioSelector" + } + }, "__mapOf__string": { "type": "map", "key": { diff --git a/botocore/data/s3control/2018-08-20/service-2.json b/botocore/data/s3control/2018-08-20/service-2.json index f7f4126612..a234cfc299 100644 --- a/botocore/data/s3control/2018-08-20/service-2.json +++ b/botocore/data/s3control/2018-08-20/service-2.json @@ -3380,7 +3380,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" }, "GeneratedManifestEncryption":{ "type":"structure", @@ -5102,7 +5102,7 @@ }, "ExpiredObjectDeleteMarker":{ "shape":"ExpiredObjectDeleteMarker", - "documentation":"Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired. If set to false, the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.
" + "documentation":"Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to true, the delete marker will be expired. If set to false, the policy takes no action. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. To learn more about delete markers, see Working with delete markers.
" } }, "documentation":"The container of the Outposts bucket lifecycle expiration.
" @@ -7408,7 +7408,7 @@ "members":{ "TargetResource":{ "shape":"S3RegionalOrS3ExpressBucketArnString", - "documentation":"Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation.
General purpose buckets - For example, to copy objects to a general purpose bucket named destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.
Directory buckets - For example, to copy objects to a directory bucket named destinationBucket
in the Availability Zone; identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
.
Specifies the destination bucket Amazon Resource Name (ARN) for the batch copy operation.
General purpose buckets - For example, to copy objects to a general purpose bucket named destinationBucket
, set the TargetResource
property to arn:aws:s3:::destinationBucket
.
Directory buckets - For example, to copy objects to a directory bucket named destinationBucket
in the Availability Zone identified by the AZ ID usw2-az1
, set the TargetResource
property to arn:aws:s3express:region:account_id:/bucket/destination_bucket_base_name--usw2-az1--x-s3
. A directory bucket as a destination bucket can be in Availability Zone or Local Zone.
Copying objects across different Amazon Web Services Regions isn't supported when the source or destination bucket is in Amazon Web Services Local Zones. The source and destination buckets must have the same parent Amazon Web Services Region. Otherwise, you get an HTTP 400 Bad Request
error with the error code InvalidRequest
.