diff --git a/.changelog/45251.txt b/.changelog/45251.txt new file mode 100644 index 000000000000..a524bc46c3b0 --- /dev/null +++ b/.changelog/45251.txt @@ -0,0 +1,12 @@ +```release-note:new-resource +aws_s3_bucket_abac +``` +```release-note:enhancement +resource/aws_s3_bucket: Use the S3 Control tagging APIs when the `s3:TagResource`, `s3:UntagResource`, and `s3:ListTagsForResource` permissions are present +``` +```release-note:enhancement +resource/aws_s3_bucket: Tag on creation when the `s3:TagResource` permission is present +``` +```release-note:note +resource/aws_s3_bucket: To support ABAC (Attribute Based Access Control) in general purpose buckets, this resource will now attempt to send tags in the create request and use the S3 Control tagging APIs [`TagResource`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_TagResource.html), [`UntagResource`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UntagResource.html), and [`ListTagsForResource`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListTagsForResource.html) for read and update operations. The calling principal must have the corresponding `s3:TagResource`, `s3:UntagResource`, and `s3:ListTagsForResource` [IAM permissions](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-actions-as-permissions). If the principal lacks the appropriate permissions, the provider will fall back to tagging after creation and using the S3 tagging APIs [`PutBucketTagging`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html), [`DeleteBucketTagging`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html), and [`GetBucketTagging`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) instead. With ABAC enabled, tag modifications may fail with the fall back behavior. See the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/buckets-tagging-enable-abac.html) for additional details on enabling ABAC in general purpose buckets. +``` diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index f9f031052d0d..0efbdff3ed32 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -768,10 +768,30 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta any) input.ObjectLockEnabledForBucket = aws.Bool(true) } + // Tag on create requires the s3:TagResource IAM permission + tagOnCreate := true + if input.CreateBucketConfiguration == nil { + input.CreateBucketConfiguration = &types.CreateBucketConfiguration{ + Tags: getTagsIn(ctx), + } + } else { + input.CreateBucketConfiguration.Tags = getTagsIn(ctx) + } + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { return conn.CreateBucket(ctx, input) }, errCodeOperationAborted) + if errs.Contains(err, "is not authorized to perform: s3:TagResource") { + // Remove tags and try again + input.CreateBucketConfiguration.Tags = nil + tagOnCreate = false + + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func(ctx context.Context) (any, error) { + return conn.CreateBucket(ctx, input) + }, errCodeOperationAborted) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s): %s", bucket, err) } @@ -786,8 +806,10 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta any) return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket (%s) create: %s", d.Id(), err) } - if err := bucketCreateTags(ctx, conn, d.Id(), getTagsIn(ctx)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting S3 Bucket (%s) tags: %s", d.Id(), err) + if !tagOnCreate { + if err := bucketCreateTags(ctx, conn, d.Id(), getTagsIn(ctx)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting S3 Bucket (%s) tags: %s", d.Id(), err) + } } return append(diags, resourceBucketUpdate(ctx, d, meta)...) diff --git a/internal/service/s3/bucket_abac.go b/internal/service/s3/bucket_abac.go new file mode 100644 index 000000000000..8864177c82cc --- /dev/null +++ b/internal/service/s3/bucket_abac.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + + "github.com/YakDriver/smarterr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + "github.com/hashicorp/terraform-provider-aws/internal/smerr" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_s3_bucket_abac", name="Bucket ABAC") +func newResourceBucketABAC(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceBucketABAC{}, nil +} + +const ( + ResNameBucketABAC = "Bucket ABAC" +) + +type resourceBucketABAC struct { + framework.ResourceWithModel[resourceBucketABACModel] +} + +func (r *resourceBucketABAC) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrBucket: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 63), + }, + }, + names.AttrExpectedBucketOwner: schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + fwvalidators.AWSAccountID(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "abac_status": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[abacStatusModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrStatus: schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + }, + } +} + +func (r *resourceBucketABAC) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan resourceBucketABACModel + smerr.AddEnrich(ctx, &resp.Diagnostics, req.Plan.Get(ctx, &plan)) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + bucket := plan.Bucket.ValueString() + if isDirectoryBucket(bucket) { + conn = r.Meta().S3ExpressClient(ctx) + } + + var input s3.PutBucketAbacInput + smerr.AddEnrich(ctx, &resp.Diagnostics, flex.Expand(ctx, plan, &input)) + if resp.Diagnostics.HasError() { + return + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { + return conn.PutBucketAbac(ctx, &input) + }, errCodeNoSuchBucket) + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, bucket) + return + } + + smerr.AddEnrich(ctx, &resp.Diagnostics, resp.State.Set(ctx, plan)) +} + +func (r *resourceBucketABAC) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state resourceBucketABACModel + smerr.AddEnrich(ctx, &resp.Diagnostics, req.State.Get(ctx, &state)) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + bucket := state.Bucket.ValueString() + expectedBucketOwner := state.ExpectedBucketOwner.ValueString() + if isDirectoryBucket(bucket) { + conn = r.Meta().S3ExpressClient(ctx) + } + + out, err := findBucketABAC(ctx, conn, bucket, expectedBucketOwner) + if tfresource.NotFound(err) { + resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + resp.State.RemoveResource(ctx) + return + } + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, bucket) + return + } + + smerr.AddEnrich(ctx, &resp.Diagnostics, flex.Flatten(ctx, out, &state.ABACStatus)) + if resp.Diagnostics.HasError() { + return + } + + smerr.AddEnrich(ctx, &resp.Diagnostics, resp.State.Set(ctx, &state)) +} + +func (r *resourceBucketABAC) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan, state resourceBucketABACModel + smerr.AddEnrich(ctx, &resp.Diagnostics, req.Plan.Get(ctx, &plan)) + smerr.AddEnrich(ctx, &resp.Diagnostics, req.State.Get(ctx, &state)) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + bucket := state.Bucket.ValueString() + if isDirectoryBucket(bucket) { + conn = r.Meta().S3ExpressClient(ctx) + } + + diff, d := flex.Diff(ctx, plan, state) + smerr.AddEnrich(ctx, &resp.Diagnostics, d) + if resp.Diagnostics.HasError() { + return + } + + if diff.HasChanges() { + var input s3.PutBucketAbacInput + smerr.AddEnrich(ctx, &resp.Diagnostics, flex.Expand(ctx, plan, &input)) + if resp.Diagnostics.HasError() { + return + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func(ctx context.Context) (any, error) { + return conn.PutBucketAbac(ctx, &input) + }, errCodeNoSuchBucket) + if err != nil { + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, bucket) + return + } + } + + smerr.AddEnrich(ctx, &resp.Diagnostics, resp.State.Set(ctx, &plan)) +} + +func (r *resourceBucketABAC) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state resourceBucketABACModel + smerr.AddEnrich(ctx, &resp.Diagnostics, req.State.Get(ctx, &state)) + if resp.Diagnostics.HasError() { + return + } + + conn := r.Meta().S3Client(ctx) + bucket := state.Bucket.ValueString() + if isDirectoryBucket(bucket) { + conn = r.Meta().S3ExpressClient(ctx) + } + + var input s3.PutBucketAbacInput + smerr.AddEnrich(ctx, &resp.Diagnostics, flex.Expand(ctx, state, &input)) + if resp.Diagnostics.HasError() { + return + } + input.AbacStatus = &awstypes.AbacStatus{ + Status: awstypes.BucketAbacStatusDisabled, + } + + _, err := conn.PutBucketAbac(ctx, &input) + if err != nil { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return + } + + smerr.AddError(ctx, &resp.Diagnostics, err, smerr.ID, bucket) + return + } +} + +func (r *resourceBucketABAC) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + bucket, expectedBucketOwner, err := parseResourceID(request.ID) + if err != nil { + response.Diagnostics.Append(fwdiag.NewParsingResourceIDErrorDiagnostic(err)) + + return + } + + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrBucket), bucket)...) + if expectedBucketOwner != "" { + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrExpectedBucketOwner), expectedBucketOwner)...) + } +} + +func findBucketABAC(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*awstypes.AbacStatus, error) { + input := s3.GetBucketAbacInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + out, err := conn.GetBucketAbac(ctx, &input) + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, smarterr.NewError(err) + } + + if out == nil || out.AbacStatus == nil { + return nil, smarterr.NewError(tfresource.NewEmptyResultError(&input)) + } + + return out.AbacStatus, nil +} + +type resourceBucketABACModel struct { + framework.WithRegionModel + ABACStatus fwtypes.ListNestedObjectValueOf[abacStatusModel] `tfsdk:"abac_status"` + Bucket types.String `tfsdk:"bucket"` + ExpectedBucketOwner types.String `tfsdk:"expected_bucket_owner"` +} + +type abacStatusModel struct { + Status types.String `tfsdk:"status"` +} diff --git a/internal/service/s3/bucket_abac_test.go b/internal/service/s3/bucket_abac_test.go new file mode 100644 index 000000000000..f128149b0f15 --- /dev/null +++ b/internal/service/s3/bucket_abac_test.go @@ -0,0 +1,267 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3_test + +import ( + "context" + "errors" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/terraform-plugin-testing/compare" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccS3BucketABAC_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_abac.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketABACDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketABACConfig_basic(rName, string(awstypes.BucketAbacStatusEnabled)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketABACExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrBucket), "aws_s3_bucket.test", tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusEnabled)), + }), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusEnabled)), + }), + })), + }, + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: acctest.AttrImportStateIdFunc(resourceName, names.AttrBucket), + ImportStateVerifyIdentifierAttribute: names.AttrBucket, + }, + }, + }) +} + +// There is no standard `_disappears` test because deletion of the resource only disables ABAC. +func TestAccS3BucketABAC_disappears_Bucket(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_abac.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketABACDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketABACConfig_basic(rName, string(awstypes.BucketAbacStatusEnabled)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketABACExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceBucket(), bucketResourceName), + ), + ExpectNonEmptyPlan: true, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + }, + }, + }, + }) +} + +func TestAccS3BucketABAC_update(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_abac.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckBucketABACDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccBucketABACConfig_basic(rName, string(awstypes.BucketAbacStatusEnabled)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketABACExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrBucket), "aws_s3_bucket.test", tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusEnabled)), + }), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusEnabled)), + }), + })), + }, + }, + }, + { + Config: testAccBucketABACConfig_basic(rName, string(awstypes.BucketAbacStatusDisabled)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketABACExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrBucket), "aws_s3_bucket.test", tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusDisabled)), + }), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusDisabled)), + }), + })), + }, + }, + }, + { + Config: testAccBucketABACConfig_basic(rName, string(awstypes.BucketAbacStatusEnabled)), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketABACExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.CompareValuePairs(resourceName, tfjsonpath.New(names.AttrBucket), "aws_s3_bucket.test", tfjsonpath.New(names.AttrBucket), compare.ValuesSame()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrExpectedBucketOwner), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusEnabled)), + }), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("abac_status"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrStatus: knownvalue.StringExact(string(awstypes.BucketAbacStatusEnabled)), + }), + })), + }, + }, + }, + }, + }) +} + +func testAccCheckBucketABACDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_abac" { + continue + } + + bucket := rs.Primary.Attributes[names.AttrBucket] + expectedBucketOwner := rs.Primary.Attributes[names.AttrExpectedBucketOwner] + + _, err := tfs3.FindBucketABAC(ctx, conn, bucket, expectedBucketOwner) + if tfresource.NotFound(err) { + return nil + } + if err != nil { + return create.Error(names.S3, create.ErrActionCheckingDestroyed, tfs3.ResNameBucketABAC, bucket, err) + } + + return create.Error(names.S3, create.ErrActionCheckingDestroyed, tfs3.ResNameBucketABAC, bucket, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckBucketABACExists(ctx context.Context, name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.S3, create.ErrActionCheckingExistence, tfs3.ResNameBucketABAC, name, errors.New("not found")) + } + + bucket := rs.Primary.Attributes[names.AttrBucket] + expectedBucketOwner := rs.Primary.Attributes[names.AttrExpectedBucketOwner] + if bucket == "" { + return create.Error(names.S3, create.ErrActionCheckingExistence, tfs3.ResNameBucketABAC, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(bucket) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } + + _, err := tfs3.FindBucketABAC(ctx, conn, bucket, expectedBucketOwner) + if err != nil { + return create.Error(names.S3, create.ErrActionCheckingExistence, tfs3.ResNameBucketABAC, bucket, err) + } + + return nil + } +} + +func testAccBucketABACConfig_basic(rName, status string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_abac" "test" { + bucket = aws_s3_bucket.test.bucket + + abac_status { + status = %[2]q + } +} +`, rName, status) +} diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index f1b79813503a..548bb39e9ac7 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -5,6 +5,7 @@ package s3 // Exports for use in tests only. var ( + ResourceBucketABAC = newResourceBucketABAC ResourceBucketAccelerateConfiguration = resourceBucketAccelerateConfiguration ResourceBucketACL = resourceBucketACL ResourceBucketAnalyticsConfiguration = resourceBucketAnalyticsConfiguration @@ -36,6 +37,7 @@ var ( EmptyBucket = emptyBucket FindAnalyticsConfiguration = findAnalyticsConfiguration FindBucket = findBucket + FindBucketABAC = findBucketABAC FindBucketACL = findBucketACL FindBucketAccelerateConfiguration = findBucketAccelerateConfiguration FindBucketLifecycleConfiguration = findBucketLifecycleConfiguration diff --git a/internal/service/s3/service_package_gen.go b/internal/service/s3/service_package_gen.go index a111efee92b4..fdbee61c06b1 100644 --- a/internal/service/s3/service_package_gen.go +++ b/internal/service/s3/service_package_gen.go @@ -30,6 +30,12 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*inttypes.S func (p *servicePackage) FrameworkResources(ctx context.Context) []*inttypes.ServicePackageFrameworkResource { return []*inttypes.ServicePackageFrameworkResource{ + { + Factory: newResourceBucketABAC, + TypeName: "aws_s3_bucket_abac", + Name: "Bucket ABAC", + Region: unique.Make(inttypes.ResourceRegionDefault()), + }, { Factory: newBucketLifecycleConfigurationResource, TypeName: "aws_s3_bucket_lifecycle_configuration", diff --git a/internal/service/s3/tags.go b/internal/service/s3/tags.go index 325974e9a3cc..829e55f70767 100644 --- a/internal/service/s3/tags.go +++ b/internal/service/s3/tags.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/endpoints" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfs3control "github.com/hashicorp/terraform-provider-aws/internal/service/s3control" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/types/option" @@ -174,7 +175,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res switch resourceType { case "Bucket": - tags, err = bucketListTags(ctx, conn, identifier) + // Attempt ListTagsForResource first, fall back to GetBucketTagging + tags, err = tfs3control.ListTags(ctx, c.S3ControlClient(ctx), bucketARN(ctx, c, identifier), c.AccountID(ctx)) + if errs.Contains(err, "is not authorized to perform: s3:ListTagsForResource") { + tags, err = bucketListTags(ctx, conn, identifier) + } case "DirectoryBucket": tags, err = tfs3control.ListTags(ctx, c.S3ControlClient(ctx), identifier, c.AccountID(ctx)) @@ -221,7 +226,12 @@ func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier, r switch resourceType { case "Bucket": - return bucketUpdateTags(ctx, conn, identifier, oldTags, newTags) + // Attempt Tag/UntagResource first, fall back to Put/DeleteBucketTagging + err := tfs3control.UpdateTags(ctx, c.S3ControlClient(ctx), bucketARN(ctx, c, identifier), c.AccountID(ctx), oldTags, newTags) + if errs.Contains(err, "is not authorized to perform: s3:TagResource") || errs.Contains(err, "is not authorized to perform: s3:UntagResource") { + return bucketUpdateTags(ctx, conn, identifier, oldTags, newTags) + } + return err case "DirectoryBucket": return tfs3control.UpdateTags(ctx, c.S3ControlClient(ctx), identifier, c.AccountID(ctx), oldTags, newTags) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index d3f0878464a1..1c4f5995e37b 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -14,6 +14,8 @@ Provides a S3 bucket resource. -> Object Lock can be enabled by using the `object_lock_enable` attribute or by using the [`aws_s3_bucket_object_lock_configuration`](/docs/providers/aws/r/s3_bucket_object_lock_configuration.html) resource. Please note, that by using the resource, Object Lock can be enabled/disabled without destroying and recreating the bucket. +-> To support ABAC (Attribute Based Access Control) in general purpose buckets, this resource will now attempt to send tags in the create request and use the S3 Control tagging APIs [`TagResource`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_TagResource.html), [`UntagResource`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UntagResource.html), and [`ListTagsForResource`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListTagsForResource.html) for read and update operations. The calling principal must have the corresponding `s3:TagResource`, `s3:UntagResource`, and `s3:ListTagsForResource` [IAM permissions](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-actions-as-permissions). If the principal lacks the appropriate permissions, the provider will fall back to tagging after creation and using the S3 tagging APIs [`PutBucketTagging`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html), [`DeleteBucketTagging`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html), and [`GetBucketTagging`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) instead. With ABAC enabled, tag modifications may fail with the fall back behavior. See the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/buckets-tagging-enable-abac.html) for additional details on enabling ABAC in general purpose buckets. + ## Example Usage ### Private Bucket With Tags diff --git a/website/docs/r/s3_bucket_abac.html.markdown b/website/docs/r/s3_bucket_abac.html.markdown new file mode 100644 index 000000000000..48f220ef9efb --- /dev/null +++ b/website/docs/r/s3_bucket_abac.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "S3 (Simple Storage)" +layout: "aws" +page_title: "AWS: aws_s3_bucket_abac" +description: |- + Manages ABAC (Attribute Based Access Control) for an AWS S3 (Simple Storage) Bucket. +--- + +# Resource: aws_s3_bucket_abac + +Manages ABAC (Attribute Based Access Control) for an AWS S3 (Simple Storage) Bucket. +See the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/buckets-tagging-enable-abac.html) on enabling ABAC for general purpose buckets for additional information. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "bucket-name" +} + +resource "aws_s3_bucket_abac" "example" { + bucket = aws_s3_bucket.example.bucket + + abac_status { + status = "Enabled" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `bucket` - (Required) General purpose bucket that you want to create the metadata configuration for. +* `abac_status` - (Required) ABAC status configuration. See [`abac_status` Block](#abac_status-block) for details. + +The following arguments are optional: + +* `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. +* `region` - (Optional) Region where this resource will be [managed](https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints). Defaults to the Region set in the [provider configuration](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#aws-configuration-reference). + +### `abac_status` Block + +The `abac_status` configuration block supports the following arguments: + +* `status` - (Required) ABAC status of the general purpose bucket. +Valid values are `Enabled` and `Disabled`. +By default, ABAC is disabled for all Amazon S3 general purpose buckets. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import S3 (Simple Storage) Bucket ABAC using the `bucket` or `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```terraform +import { + to = aws_s3_bucket_abac.example + id = "bucket-name" +} +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expected_bucket_owner` separated by a comma (`,`): + +```terraform +import { + to = aws_s3_bucket_abac.example + id = "bucket-name,123456789012" +} +``` + +Using `terraform import`, import S3 (Simple Storage) Bucket ABAC using the `bucket` or `bucket` and `expected_bucket_owner` separated by a comma (`,`). For example: + +If the owner (account ID) of the source bucket is the same account used to configure the Terraform AWS Provider, import using the `bucket`: + +```console +% terraform import aws_s3_bucket_abac.example bucket-name +``` + +If the owner (account ID) of the source bucket differs from the account used to configure the Terraform AWS Provider, import using the `bucket` and `expected_bucket_owner` separated by a comma (`,`): + +```console +% terraform import aws_s3_bucket_abac.example bucket-name,123456789012 +```