Skip to content

Commit

Permalink
Object lock feature implementation for Storage (#9363) (#6588)
Browse files Browse the repository at this point in the history
* Early implementation of bucket object lock setting.

* Complete implementation of object lock for buckets.

* Compelete implementation, waiting for allowlist.

* Tests pass against test environment.

* Fixes bug in object test.

* Update mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go



* Update mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go



* Update mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb



* Update mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go



* Update mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb



* Updates from review comments

* Update mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go



* Update mmv1/third_party/terraform/website/docs/r/storage_bucket_object.html.markdown



* Removing non-deterministic date from test

* Removing unused variable

* fixing nested_customer_encryption anchor link

---------


[upstream:4f308083373dd8a5b409e42fbe3e7fdd561853c8]

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Oct 31, 2023
1 parent 72b114a commit 662ecd8
Show file tree
Hide file tree
Showing 7 changed files with 254 additions and 5 deletions.
6 changes: 6 additions & 0 deletions .changelog/9363.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:enhancement
storage: added `retention` field to `google_storage_bucket_object` resource
```
```release-note:enhancement
storage: added `enable_object_retention` to `google_storage_bucket` resource
```
26 changes: 25 additions & 1 deletion google-beta/services/storage/resource_storage_bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,13 @@ func ResourceStorageBucket() *schema.Resource {
Description: `The bucket's Lifecycle Rules configuration.`,
},

"enable_object_retention": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Description: `Enables each object in the bucket to have its own retention policy, which prevents deletion until stored for a specific length of time.`,
},

"versioning": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -595,7 +602,11 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error

err = transport_tpg.Retry(transport_tpg.RetryOptions{
RetryFunc: func() error {
res, err = config.NewStorageClient(userAgent).Buckets.Insert(project, sb).Do()
insertCall := config.NewStorageClient(userAgent).Buckets.Insert(project, sb)
if d.Get("enable_object_retention").(bool) {
insertCall.EnableObjectRetention(true)
}
res, err = insertCall.Do()
return err
},
})
Expand Down Expand Up @@ -1122,6 +1133,16 @@ func flattenBucketRetentionPolicy(bucketRetentionPolicy *storage.BucketRetention
return bucketRetentionPolicies
}

func flattenBucketObjectRetention(bucketObjectRetention *storage.BucketObjectRetention) bool {
if bucketObjectRetention == nil {
return false
}
if bucketObjectRetention.Mode == "Enabled" {
return true
}
return false
}

func expandBucketVersioning(configured interface{}) *storage.BucketVersioning {
versionings := configured.([]interface{})
if len(versionings) == 0 {
Expand Down Expand Up @@ -1621,6 +1642,9 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res
if err := d.Set("logging", flattenBucketLogging(res.Logging)); err != nil {
return fmt.Errorf("Error setting logging: %s", err)
}
if err := d.Set("enable_object_retention", flattenBucketObjectRetention(res.ObjectRetention)); err != nil {
return fmt.Errorf("Error setting object retention: %s", err)
}
if err := d.Set("versioning", flattenBucketVersioning(res.Versioning)); err != nil {
return fmt.Errorf("Error setting versioning: %s", err)
}
Expand Down
80 changes: 77 additions & 3 deletions google-beta/services/storage/resource_storage_bucket_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,10 +209,33 @@ func ResourceStorageBucketObject() *schema.Resource {
},
},

"retention": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
ConflictsWith: []string{"event_based_hold"},
Description: `Object level retention configuration.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"retain_until_time": {
Type: schema.TypeString,
Required: true,
Description: `Time in RFC 3339 (e.g. 2030-01-01T02:03:04Z) until which object retention protects this object.`,
},
"mode": {
Type: schema.TypeString,
Required: true,
Description: `The object retention mode. Supported values include: "Unlocked", "Locked".`,
},
},
},
},

"event_based_hold": {
Type: schema.TypeBool,
Optional: true,
Description: `Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any).`,
Type: schema.TypeBool,
Optional: true,
ConflictsWith: []string{"retention"},
Description: `Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any).`,
},

"temporary_hold": {
Expand Down Expand Up @@ -314,6 +337,10 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{})
object.KmsKeyName = v.(string)
}

if v, ok := d.GetOk("retention"); ok {
object.Retention = expandObjectRetention(v)
}

if v, ok := d.GetOk("event_based_hold"); ok {
object.EventBasedHold = v.(bool)
}
Expand Down Expand Up @@ -359,6 +386,16 @@ func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{})
return fmt.Errorf("Error retrieving object during update %s: %s", name, err)
}

hasRetentionChanges := d.HasChange("retention")
if hasRetentionChanges {
if v, ok := d.GetOk("retention"); ok {
res.Retention = expandObjectRetention(v)
} else {
res.Retention = nil
res.NullFields = append(res.NullFields, "Retention")
}
}

if d.HasChange("event_based_hold") {
v := d.Get("event_based_hold")
res.EventBasedHold = v.(bool)
Expand All @@ -370,6 +407,9 @@ func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{})
}

updateCall := objectsService.Update(bucket, name, res)
if hasRetentionChanges {
updateCall.OverrideUnlockedRetention(true)
}
_, err = updateCall.Do()

if err != nil {
Expand Down Expand Up @@ -445,6 +485,9 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e
if err := d.Set("media_link", res.MediaLink); err != nil {
return fmt.Errorf("Error setting media_link: %s", err)
}
if err := d.Set("retention", flattenObjectRetention(res.Retention)); err != nil {
return fmt.Errorf("Error setting retention: %s", err)
}
if err := d.Set("event_based_hold", res.EventBasedHold); err != nil {
return fmt.Errorf("Error setting event_based_hold: %s", err)
}
Expand Down Expand Up @@ -515,3 +558,34 @@ func expandCustomerEncryption(input []interface{}) map[string]string {
}
return expanded
}

func expandObjectRetention(configured interface{}) *storage.ObjectRetention {
retentions := configured.([]interface{})
if len(retentions) == 0 {
return nil
}
retention := retentions[0].(map[string]interface{})

objectRetention := &storage.ObjectRetention{
RetainUntilTime: retention["retain_until_time"].(string),
Mode: retention["mode"].(string),
}

return objectRetention
}

func flattenObjectRetention(objectRetention *storage.ObjectRetention) []map[string]interface{} {
retentions := make([]map[string]interface{}, 0, 1)

if objectRetention == nil {
return retentions
}

retention := map[string]interface{}{
"mode": objectRetention.Mode,
"retain_until_time": objectRetention.RetainUntilTime,
}

retentions = append(retentions, retention)
return retentions
}
Original file line number Diff line number Diff line change
Expand Up @@ -417,6 +417,48 @@ func TestAccStorageObject_holds(t *testing.T) {
})
}

func TestAccStorageObject_retention(t *testing.T) {
t.Parallel()

bucketName := acctest.TestBucketName(t)
data := []byte(content)
h := md5.New()
if _, err := h.Write(data); err != nil {
t.Errorf("error calculating md5: %v", err)
}
dataMd5 := base64.StdEncoding.EncodeToString(h.Sum(nil))
testFile := getNewTmpTestFile(t, "tf-test")
if err := ioutil.WriteFile(testFile.Name(), data, 0644); err != nil {
t.Errorf("error writing file: %v", err)
}

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccStorageObjectDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testGoogleStorageBucketsObjectRetention(bucketName, "2040-01-01T02:03:04.000Z"),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5),
),
},
{
Config: testGoogleStorageBucketsObjectRetention(bucketName, "2040-01-02T02:03:04.000Z"),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5),
),
},
{
Config: testGoogleStorageBucketsObjectRetentionDisabled(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5),
),
},
},
})
}

func testAccCheckGoogleStorageObject(t *testing.T, bucket, object, md5 string) resource.TestCheckFunc {
return testAccCheckGoogleStorageObjectWithEncryption(t, bucket, object, md5, "")
}
Expand Down Expand Up @@ -648,6 +690,44 @@ resource "google_storage_bucket_object" "object" {
`, bucketName, objectName, content, customerEncryptionKey)
}

func testGoogleStorageBucketsObjectRetention(bucketName string, retainUntilTime string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
location = "US"
force_destroy = true
enable_object_retention = true
}
resource "google_storage_bucket_object" "object" {
name = "%s"
bucket = google_storage_bucket.bucket.name
content = "%s"
retention {
mode = "Unlocked"
retain_until_time = "%s"
}
}
`, bucketName, objectName, content, retainUntilTime)
}

func testGoogleStorageBucketsObjectRetentionDisabled(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
location = "US"
force_destroy = true
enable_object_retention = true
}
resource "google_storage_bucket_object" "object" {
name = "%s"
bucket = google_storage_bucket.bucket.name
content = "%s"
}
`, bucketName, objectName, content)
}

func testGoogleStorageBucketsObjectHolds(bucketName string, eventBasedHold bool, temporaryHold bool) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
Expand Down
52 changes: 52 additions & 0 deletions google-beta/services/storage/resource_storage_bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -692,6 +692,47 @@ func TestAccStorageBucket_forceDestroyObjectDeleteError(t *testing.T) {
})
}

func TestAccStorageBucket_enable_object_retention(t *testing.T) {
t.Parallel()

var bucket storage.Bucket
bucketName := acctest.TestBucketName(t)

acctest.VcrTest(t, resource.TestCase{
PreCheck: func() { acctest.AccTestPreCheck(t) },
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t),
CheckDestroy: testAccStorageBucketDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccStorageBucket_enable_object_retention(bucketName, "true"),
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
t, "google_storage_bucket.bucket", bucketName, &bucket),
),
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
{
Config: testAccStorageBucket_enable_object_retention(bucketName, "false"),
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
t, "google_storage_bucket.bucket", bucketName, &bucket),
),
},
{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
},
})
}

func TestAccStorageBucket_versioning(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1496,6 +1537,17 @@ resource "google_storage_bucket" "bucket" {
`, bucketName)
}

func testAccStorageBucket_enable_object_retention(bucketName string, enabled string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
location = "US"
force_destroy = "true"
enable_object_retention = "%s"
}
`, bucketName, enabled)
}

func testAccStorageBucket_versioning(bucketName, enabled string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
Expand Down
3 changes: 3 additions & 0 deletions website/docs/r/storage_bucket.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,9 @@ The following arguments are supported:

* `encryption` - (Optional) The bucket's encryption configuration. Structure is [documented below](#nested_encryption).

* `enable_object_retention` - (Optional, Default: false) Enables [object retention](https://cloud.google.com/storage/docs/object-lock) on a storage bucket.


* `requester_pays` - (Optional, Default: false) Enables [Requester Pays](https://cloud.google.com/storage/docs/requester-pays) on a storage bucket.

* `uniform_bucket_level_access` - (Optional, Default: false) Enables [Uniform bucket-level access](https://cloud.google.com/storage/docs/uniform-bucket-level-access) access to a bucket.
Expand Down
12 changes: 11 additions & 1 deletion website/docs/r/storage_bucket_object.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,9 @@ One of the following is required:
* `content_type` - (Optional) [Content-Type](https://tools.ietf.org/html/rfc7231#section-3.1.1.5) of the object data. Defaults to "application/octet-stream" or "text/plain; charset=utf-8".

* `customer_encryption` - (Optional) Enables object encryption with Customer-Supplied Encryption Key (CSEK). [Google [documentation about](#nested_customer_encryption) CSEK.](https://cloud.google.com/storage/docs/encryption/customer-supplied-keys)
Structure is documented below.
Structure is [documented below](#nested_customer_encryption).

* `retention` - (Optional) The [object retention](http://cloud.google.com/storage/docs/object-lock) settings for the object. The retention settings allow an object to be retained until a provided date. Structure is [documented below](#nested_retention).

* `event_based_hold` - (Optional) Whether an object is under [event-based hold](https://cloud.google.com/storage/docs/object-holds#hold-types). Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any).

Expand All @@ -89,6 +91,14 @@ One of the following is required:

* `encryption_key` - (Required) Base64 encoded Customer-Supplied Encryption Key.

<a name="nested_retention"></a>The `retention` block supports:

* `mode` - (Required) The retention policy mode. Either `Locked` or `Unlocked`.

* `retain_until_time` - (Required) The time to retain the object until in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.

<a name>

## Attributes Reference

In addition to the arguments listed above, the following computed attributes are
Expand Down

0 comments on commit 662ecd8

Please sign in to comment.