diff --git a/docs/resources/gcore_lifecyclepolicy.md b/docs/resources/gcore_lifecyclepolicy.md
new file mode 100644
index 0000000..14752ab
--- /dev/null
+++ b/docs/resources/gcore_lifecyclepolicy.md
@@ -0,0 +1,141 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "gcore_lifecyclepolicy Resource - terraform-provider-gcorelabs"
+subcategory: ""
+description: |-
+ Represent lifecycle policy. Use to periodically take snapshots
+---
+
+# gcore_lifecyclepolicy (Resource)
+
+Represent lifecycle policy. Use to periodically take snapshots
+
+## Example Usage
+
+```terraform
+provider gcore {
+ user_name = "test"
+ password = "test"
+ gcore_platform = "https://api.gcdn.co"
+ gcore_api = "https://api.cloud.gcorelabs.com"
+}
+
+resource "gcore_lifecyclepolicy" "lp" {
+ project_id = 1
+ region_id = 1
+ name = "test"
+ status = "active"
+ action = "volume_snapshot"
+ volume {
+ id = "fe93bfdd-4ce3-4041-b89b-4f10d0d49498"
+ }
+ schedule {
+ max_quantity = 4
+ interval {
+ weeks = 1
+ days = 2
+ hours = 3
+ minutes = 4
+ }
+ resource_name_template = "reserve snap of the volume {volume_id}"
+ retention_time {
+ weeks = 4
+ days = 3
+ hours = 2
+ minutes = 1
+ }
+ }
+}
+```
+
+
+## Schema
+
+### Required
+
+- **name** (String)
+
+### Optional
+
+- **action** (String)
+- **id** (String) The ID of this resource.
+- **project_id** (Number)
+- **project_name** (String)
+- **region_id** (Number)
+- **region_name** (String)
+- **schedule** (Block List) (see [below for nested schema](#nestedblock--schedule))
+- **status** (String)
+- **volume** (Block Set) List of managed volumes (see [below for nested schema](#nestedblock--volume))
+
+### Read-Only
+
+- **user_id** (Number)
+
+
+### Nested Schema for `schedule`
+
+Required:
+
+- **max_quantity** (Number) Maximum number of stored resources
+
+Optional:
+
+- **cron** (Block List, Max: 1) Use for taking actions at specified moments of time. Exactly one of interval and cron blocks should be provided (see [below for nested schema](#nestedblock--schedule--cron))
+- **interval** (Block List, Max: 1) Use for taking actions with equal time intervals between them. Exactly one of interval and cron blocks should be provided (see [below for nested schema](#nestedblock--schedule--interval))
+- **resource_name_template** (String) Used to name snapshots. {volume_id} is substituted with volume.id on creation
+- **retention_time** (Block List, Max: 1) If it is set, new resource will be deleted after time (see [below for nested schema](#nestedblock--schedule--retention_time))
+
+Read-Only:
+
+- **id** (String) The ID of this resource.
+- **type** (String)
+
+
+### Nested Schema for `schedule.cron`
+
+Optional:
+
+- **day** (String) Either single asterisk or comma-separated list of integers (1-31)
+- **day_of_week** (String) Either single asterisk or comma-separated list of integers (0-6)
+- **hour** (String) Either single asterisk or comma-separated list of integers (0-23)
+- **minute** (String) Either single asterisk or comma-separated list of integers (0-59)
+- **month** (String) Either single asterisk or comma-separated list of integers (1-12)
+- **timezone** (String)
+- **week** (String) Either single asterisk or comma-separated list of integers (1-53)
+
+
+
+### Nested Schema for `schedule.interval`
+
+Optional:
+
+- **days** (Number) Number of days to wait between actions
+- **hours** (Number) Number of hours to wait between actions
+- **minutes** (Number) Number of minutes to wait between actions
+- **weeks** (Number) Number of weeks to wait between actions
+
+
+
+### Nested Schema for `schedule.retention_time`
+
+Optional:
+
+- **days** (Number) Number of days to wait before deleting snapshot
+- **hours** (Number) Number of hours to wait before deleting snapshot
+- **minutes** (Number) Number of minutes to wait before deleting snapshot
+- **weeks** (Number) Number of weeks to wait before deleting snapshot
+
+
+
+
+### Nested Schema for `volume`
+
+Required:
+
+- **id** (String) The ID of this resource.
+
+Read-Only:
+
+- **name** (String)
+
+
diff --git a/examples/resources/gcore_lifecyclepolicy/resource.tf b/examples/resources/gcore_lifecyclepolicy/resource.tf
new file mode 100644
index 0000000..bbe8f40
--- /dev/null
+++ b/examples/resources/gcore_lifecyclepolicy/resource.tf
@@ -0,0 +1,33 @@
+provider gcore {
+ user_name = "test"
+ password = "test"
+ gcore_platform = "https://api.gcdn.co"
+ gcore_api = "https://api.cloud.gcorelabs.com"
+}
+
+resource "gcore_lifecyclepolicy" "lp" {
+ project_id = 1
+ region_id = 1
+ name = "test"
+ status = "active"
+ action = "volume_snapshot"
+ volume {
+ id = "fe93bfdd-4ce3-4041-b89b-4f10d0d49498"
+ }
+ schedule {
+ max_quantity = 4
+ interval {
+ weeks = 1
+ days = 2
+ hours = 3
+ minutes = 4
+ }
+ resource_name_template = "reserve snap of the volume {volume_id}"
+ retention_time {
+ weeks = 4
+ days = 3
+ hours = 2
+ minutes = 1
+ }
+ }
+}
\ No newline at end of file
diff --git a/gcore/provider.go b/gcore/provider.go
index d10037c..965fb5e 100644
--- a/gcore/provider.go
+++ b/gcore/provider.go
@@ -18,6 +18,8 @@ import (
const (
ProviderOptPermanentToken = "permanent_api_token"
ProviderOptSkipCredsAuthErr = "ignore_creds_auth_error"
+
+ lifecyclePolicyResource = "gcore_lifecyclepolicy"
)
func Provider() *schema.Provider {
@@ -101,6 +103,7 @@ func Provider() *schema.Provider {
"gcore_cdn_origingroup": resourceCDNOriginGroup(),
"gcore_cdn_rule": resourceCDNRule(),
"gcore_cdn_sslcert": resourceCDNCert(),
+ lifecyclePolicyResource: resourceLifecyclePolicy(),
},
DataSourcesMap: map[string]*schema.Resource{
"gcore_project": dataSourceProject(),
@@ -163,7 +166,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData) (interface{}
}))
cdnService := gcdn.NewService(cdnProvider)
- stHost, stPath, err := ExtractHosAndPath(storageAPI)
+ stHost, stPath, err := ExtractHostAndPath(storageAPI)
if err != nil {
return nil, diag.FromErr(fmt.Errorf("storage api url: %w", err))
}
diff --git a/gcore/provider_test.go b/gcore/provider_test.go
index 003dbd5..23b47f3 100644
--- a/gcore/provider_test.go
+++ b/gcore/provider_test.go
@@ -224,8 +224,8 @@ func objectInfo(resourceType string) string {
// resourceType is a word in capital letters
keyID := fmt.Sprintf("TEST_%s_ID", resourceType)
keyName := fmt.Sprintf("TEST_%s_NAME", resourceType)
- if regionID, exists := os.LookupEnv(keyID); exists {
- return fmt.Sprintf(`%s_id = %s`, strings.ToLower(resourceType), regionID)
+ if objectID, exists := os.LookupEnv(keyID); exists {
+ return fmt.Sprintf(`%s_id = %s`, strings.ToLower(resourceType), objectID)
}
return fmt.Sprintf(`%s_name = "%s"`, strings.ToLower(resourceType), os.Getenv(keyName))
}
diff --git a/gcore/resource_gcore_lifecyclepolicy.go b/gcore/resource_gcore_lifecyclepolicy.go
new file mode 100644
index 0000000..f5ff976
--- /dev/null
+++ b/gcore/resource_gcore_lifecyclepolicy.go
@@ -0,0 +1,556 @@
+package gcore
+
+import (
+ "context"
+ "fmt"
+ "github.com/G-Core/gcorelabscloud-go/gcore/lifecyclepolicy/v1/lifecyclepolicy"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ "log"
+ "regexp"
+ "strconv"
+)
+
+const (
+ lifecyclePolicyPoint = "lifecycle_policy"
+ // Maybe move to utils and use for other resources
+ nameRegexString = `^[a-zA-Z0-9][a-zA-Z 0-9._\-]{1,61}[a-zA-Z0-9._]$`
+)
+
+var (
+ // Maybe move to utils and use for other resources
+ nameRegex = regexp.MustCompile(nameRegexString)
+)
+
+func resourceLifecyclePolicy() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: resourceLifecyclePolicyCreate,
+ ReadContext: resourceLifecyclePolicyRead,
+ UpdateContext: resourceLifecyclePolicyUpdate,
+ DeleteContext: resourceLifecyclePolicyDelete,
+ Description: "Represent lifecycle policy. Use to periodically take snapshots",
+ Schema: map[string]*schema.Schema{
+ "project_id": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "region_id": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ },
+ "project_name": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ExactlyOneOf: []string{"project_id", "project_name"},
+ },
+ "region_name": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ExactlyOneOf: []string{"region_id", "region_name"},
+ },
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validation.StringMatch(nameRegex, ""),
+ },
+ "status": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: lifecyclepolicy.PolicyStatusActive.String(),
+ ValidateFunc: validation.StringInSlice(lifecyclepolicy.PolicyStatus("").StringList(), false),
+ },
+ "action": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: lifecyclepolicy.PolicyActionVolumeSnapshot.String(),
+ ForceNew: true,
+ ValidateFunc: validation.StringInSlice(lifecyclepolicy.PolicyAction("").StringList(), false),
+ },
+ "volume": {
+ Type: schema.TypeSet,
+ Optional: true,
+ Description: "List of managed volumes",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validation.IsUUID,
+ },
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "schedule": {
+ Type: schema.TypeList,
+ Optional: true,
+ ForceNew: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_quantity": {
+ Type: schema.TypeInt,
+ Required: true,
+ ValidateFunc: validation.IntBetween(1, 10000),
+ Description: "Maximum number of stored resources",
+ },
+ "interval": {
+ Type: schema.TypeList,
+ MinItems: 1,
+ MaxItems: 1,
+ Description: "Use for taking actions with equal time intervals between them. Exactly one of interval and cron blocks should be provided",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "weeks": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: intervalScheduleParamDescription("week"),
+ },
+ "days": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: intervalScheduleParamDescription("day"),
+ },
+ "hours": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: intervalScheduleParamDescription("hour"),
+ },
+ "minutes": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: intervalScheduleParamDescription("minute"),
+ },
+ },
+ },
+ Optional: true,
+ },
+ "cron": {
+ Type: schema.TypeList,
+ MinItems: 1,
+ MaxItems: 1,
+ Description: "Use for taking actions at specified moments of time. Exactly one of interval and cron blocks should be provided",
+ Elem: &schema.Resource{ // TODO: validate?
+ Schema: map[string]*schema.Schema{
+ "timezone": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "UTC",
+ },
+ "month": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "*",
+ Description: cronScheduleParamDescription(1, 12),
+ },
+ "week": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "*",
+ Description: cronScheduleParamDescription(1, 53),
+ },
+ "day": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "*",
+ Description: cronScheduleParamDescription(1, 31),
+ },
+ "day_of_week": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "*",
+ Description: cronScheduleParamDescription(0, 6),
+ },
+ "hour": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "*",
+ Description: cronScheduleParamDescription(0, 23),
+ },
+ "minute": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "0",
+ Description: cronScheduleParamDescription(0, 59),
+ },
+ },
+ },
+ Optional: true,
+ },
+ "resource_name_template": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "reserve snap of the volume {volume_id}",
+ Description: "Used to name snapshots. {volume_id} is substituted with volume.id on creation",
+ },
+ "retention_time": {
+ Type: schema.TypeList,
+ MinItems: 1,
+ MaxItems: 1,
+ Description: "If it is set, new resource will be deleted after time",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "weeks": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: retentionTimerParamDescription("week"),
+ },
+ "days": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: retentionTimerParamDescription("day"),
+ },
+ "hours": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: retentionTimerParamDescription("hour"),
+ },
+ "minutes": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: 0,
+ Description: retentionTimerParamDescription("minute"),
+ },
+ },
+ },
+ Optional: true,
+ },
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "type": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "user_id": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceLifecyclePolicyCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client, err := CreateClient(m.(*Config).Provider, d, lifecyclePolicyPoint, versionPointV1)
+ if err != nil {
+ return diag.Errorf("Error creating client: %s", err)
+ }
+
+ log.Printf("[DEBUG] Start of LifecyclePolicy creating")
+ opts, err := buildLifecyclePolicyCreateOpts(d)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+ policy, err := lifecyclepolicy.Create(client, *opts).Extract()
+ if err != nil {
+ return diag.Errorf("Error creating lifecycle policy: %s", err)
+ }
+ d.SetId(strconv.Itoa(policy.ID))
+ log.Printf("[DEBUG] Finish of LifecyclePolicy %s creating", d.Id())
+ return resourceLifecyclePolicyRead(ctx, d, m)
+}
+
+func resourceLifecyclePolicyRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client, err := CreateClient(m.(*Config).Provider, d, lifecyclePolicyPoint, versionPointV1)
+ if err != nil {
+ return diag.Errorf("Error creating client: %s", err)
+ }
+ id := d.Id()
+ integerId, err := strconv.Atoi(id)
+ if err != nil {
+ return diag.Errorf("Error converting lifecycle policy ID to integer: %s", err)
+ }
+
+ log.Printf("[DEBUG] Start of LifecyclePolicy %s reading", id)
+ policy, err := lifecyclepolicy.Get(client, integerId, lifecyclepolicy.GetOpts{NeedVolumes: true}).Extract()
+ if err != nil {
+ return diag.Errorf("Error getting lifecycle policy: %s", err)
+ }
+
+ _ = d.Set("name", policy.Name)
+ _ = d.Set("status", policy.Status)
+ _ = d.Set("action", policy.Action)
+ _ = d.Set("user_id", policy.UserID)
+ if err = d.Set("volume", flattenVolumes(policy.Volumes)); err != nil {
+ return diag.Errorf("error setting lifecycle policy volumes: %s", err)
+ }
+ if err = d.Set("schedule", flattenSchedules(policy.Schedules)); err != nil {
+ return diag.Errorf("error setting lifecycle policy schedules: %s", err)
+ }
+
+ log.Printf("[DEBUG] Finish of LifecyclePolicy %s reading", id)
+ return nil
+}
+
+func resourceLifecyclePolicyUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client, err := CreateClient(m.(*Config).Provider, d, lifecyclePolicyPoint, versionPointV1)
+ if err != nil {
+ return diag.Errorf("Error creating client: %s", err)
+ }
+ id := d.Id()
+ integerId, err := strconv.Atoi(id)
+ if err != nil {
+ return diag.Errorf("Error converting lifecycle policy ID to integer: %s", err)
+ }
+
+ log.Printf("[DEBUG] Start of LifecyclePolicy updating")
+ _, err = lifecyclepolicy.Update(client, integerId, buildLifecyclePolicyUpdateOpts(d)).Extract()
+ if err != nil {
+ return diag.Errorf("Error updating lifecycle policy: %s", err)
+ }
+
+ if d.HasChange("volume") {
+ oldVolumes, newVolumes := d.GetChange("volume")
+ toRemove, toAdd := volumeSymmetricDifference(oldVolumes.(*schema.Set), newVolumes.(*schema.Set))
+ _, err = lifecyclepolicy.RemoveVolumes(client, integerId, lifecyclepolicy.RemoveVolumesOpts{VolumeIds: toRemove}).Extract()
+ if err != nil {
+ return diag.Errorf("Error removing volumes from lifecycle policy: %s", err)
+ }
+ _, err = lifecyclepolicy.AddVolumes(client, integerId, lifecyclepolicy.AddVolumesOpts{VolumeIds: toAdd}).Extract()
+ if err != nil {
+ return diag.Errorf("Error adding volumes to lifecycle policy: %s", err)
+ }
+ }
+ log.Printf("[DEBUG] Finish of LifecyclePolicy %v updating", integerId)
+ return resourceLifecyclePolicyRead(ctx, d, m)
+}
+
+func resourceLifecyclePolicyDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
+ client, err := CreateClient(m.(*Config).Provider, d, lifecyclePolicyPoint, versionPointV1)
+ if err != nil {
+ return diag.Errorf("Error creating client: %s", err)
+ }
+ id := d.Id()
+ integerId, err := strconv.Atoi(id)
+ if err != nil {
+ return diag.Errorf("Error converting lifecycle policy ID to integer: %s", err)
+ }
+
+ log.Printf("[DEBUG] Start of LifecyclePolicy %s deleting", id)
+ err = lifecyclepolicy.Delete(client, integerId)
+ if err != nil {
+ return diag.Errorf("Error deleting lifecycle policy: %s", err)
+ }
+ d.SetId("")
+ log.Printf("[DEBUG] Finish of LifecyclePolicy %s deleting", id)
+ return nil
+}
+
+func expandIntervalSchedule(flat map[string]interface{}) *lifecyclepolicy.CreateIntervalScheduleOpts {
+ return &lifecyclepolicy.CreateIntervalScheduleOpts{
+ Weeks: flat["weeks"].(int),
+ Days: flat["days"].(int),
+ Hours: flat["hours"].(int),
+ Minutes: flat["minutes"].(int),
+ }
+}
+
+func expandCronSchedule(flat map[string]interface{}) *lifecyclepolicy.CreateCronScheduleOpts {
+ return &lifecyclepolicy.CreateCronScheduleOpts{
+ Timezone: flat["timezone"].(string),
+ Week: flat["week"].(string),
+ DayOfWeek: flat["day_of_week"].(string),
+ Month: flat["month"].(string),
+ Day: flat["day"].(string),
+ Hour: flat["hour"].(string),
+ Minute: flat["minute"].(string),
+ }
+}
+
+func expandRetentionTimer(flat []interface{}) *lifecyclepolicy.RetentionTimer {
+ if len(flat) > 0 {
+ rawRetention := flat[0].(map[string]interface{})
+ return &lifecyclepolicy.RetentionTimer{
+ Weeks: rawRetention["weeks"].(int),
+ Days: rawRetention["days"].(int),
+ Hours: rawRetention["hours"].(int),
+ Minutes: rawRetention["minutes"].(int),
+ }
+ }
+ return nil
+}
+
+func expandSchedule(flat map[string]interface{}) (expanded lifecyclepolicy.CreateScheduleOpts, err error) {
+ t := lifecyclepolicy.ScheduleType("")
+ intervalSlice := flat["interval"].([]interface{})
+ cronSlice := flat["cron"].([]interface{})
+ if len(intervalSlice)+len(cronSlice) != 1 {
+ return nil, fmt.Errorf("exactly one of interval and cron blocks should be provided")
+ }
+ if len(intervalSlice) > 0 {
+ t = lifecyclepolicy.ScheduleTypeInterval
+ expanded = expandIntervalSchedule(intervalSlice[0].(map[string]interface{}))
+ } else {
+ t = lifecyclepolicy.ScheduleTypeCron
+ expanded = expandCronSchedule(cronSlice[0].(map[string]interface{}))
+ }
+ expanded.SetCommonCreateScheduleOpts(lifecyclepolicy.CommonCreateScheduleOpts{
+ Type: t,
+ ResourceNameTemplate: flat["resource_name_template"].(string),
+ MaxQuantity: flat["max_quantity"].(int),
+ RetentionTime: expandRetentionTimer(flat["retention_time"].([]interface{})),
+ })
+ return
+}
+
+func expandSchedules(flat []interface{}) ([]lifecyclepolicy.CreateScheduleOpts, error) {
+ expanded := make([]lifecyclepolicy.CreateScheduleOpts, len(flat))
+ for i, x := range flat {
+ exp, err := expandSchedule(x.(map[string]interface{}))
+ if err != nil {
+ return nil, err
+ }
+ expanded[i] = exp
+ }
+ return expanded, nil
+}
+
+func expandVolumeIds(flat []interface{}) []string {
+ expanded := make([]string, len(flat))
+ for i, x := range flat {
+ expanded[i] = x.(map[string]interface{})["id"].(string)
+ }
+ return expanded
+}
+
+func buildLifecyclePolicyCreateOpts(d *schema.ResourceData) (*lifecyclepolicy.CreateOpts, error) {
+ schedules, err := expandSchedules(d.Get("schedule").([]interface{}))
+ if err != nil {
+ return nil, err
+ }
+ opts := &lifecyclepolicy.CreateOpts{
+ Name: d.Get("name").(string),
+ Status: lifecyclepolicy.PolicyStatus(d.Get("status").(string)),
+ Schedules: schedules,
+ VolumeIds: expandVolumeIds(d.Get("volume").(*schema.Set).List()),
+ }
+
+ // Action is required field from API point of view, but optional for us
+ if action, ok := d.GetOk("action"); ok {
+ opts.Action = lifecyclepolicy.PolicyAction(action.(string))
+ } else {
+ opts.Action = lifecyclepolicy.PolicyActionVolumeSnapshot
+ }
+ return opts, nil
+}
+
+func volumeSymmetricDifference(oldVolumes, newVolumes *schema.Set) ([]string, []string) {
+ toRemove := make([]string, 0)
+ for _, v := range oldVolumes.List() {
+ if !newVolumes.Contains(v) {
+ toRemove = append(toRemove, v.(map[string]interface{})["id"].(string))
+ }
+ }
+ toAdd := make([]string, 0)
+ for _, v := range newVolumes.List() {
+ if !oldVolumes.Contains(v) {
+ toAdd = append(toAdd, v.(map[string]interface{})["id"].(string))
+ }
+ }
+ return toRemove, toAdd
+}
+
+func buildLifecyclePolicyUpdateOpts(d *schema.ResourceData) lifecyclepolicy.UpdateOpts {
+ opts := lifecyclepolicy.UpdateOpts{
+ Name: d.Get("name").(string),
+ Status: lifecyclepolicy.PolicyStatus(d.Get("status").(string)),
+ }
+ return opts
+}
+
+func flattenIntervalSchedule(expanded lifecyclepolicy.IntervalSchedule) interface{} {
+ return []map[string]int{{
+ "weeks": expanded.Weeks,
+ "days": expanded.Days,
+ "hours": expanded.Hours,
+ "minutes": expanded.Minutes,
+ }}
+}
+
+func flattenCronSchedule(expanded lifecyclepolicy.CronSchedule) interface{} {
+ return []map[string]string{{
+ "timezone": expanded.Timezone,
+ "week": expanded.Week,
+ "day_of_week": expanded.DayOfWeek,
+ "month": expanded.Month,
+ "day": expanded.Day,
+ "hour": expanded.Hour,
+ "minute": expanded.Minute,
+ }}
+}
+
+func flattenRetentionTimer(expanded *lifecyclepolicy.RetentionTimer) interface{} {
+ if expanded != nil {
+ return []map[string]int{{
+ "weeks": expanded.Weeks,
+ "days": expanded.Days,
+ "hours": expanded.Hours,
+ "minutes": expanded.Minutes,
+ }}
+ }
+ return []interface{}{}
+}
+
+func flattenSchedule(expanded lifecyclepolicy.Schedule) map[string]interface{} {
+ common := expanded.GetCommonSchedule()
+ flat := map[string]interface{}{
+ "max_quantity": common.MaxQuantity,
+ "resource_name_template": common.ResourceNameTemplate,
+ "retention_time": flattenRetentionTimer(common.RetentionTime),
+ "id": common.ID,
+ "type": common.Type,
+ }
+ switch common.Type {
+ case lifecyclepolicy.ScheduleTypeInterval:
+ flat["interval"] = flattenIntervalSchedule(expanded.(lifecyclepolicy.IntervalSchedule))
+ case lifecyclepolicy.ScheduleTypeCron:
+ flat["cron"] = flattenCronSchedule(expanded.(lifecyclepolicy.CronSchedule))
+ }
+ return flat
+}
+
+func flattenSchedules(expanded []lifecyclepolicy.Schedule) []map[string]interface{} {
+ flat := make([]map[string]interface{}, len(expanded))
+ for i, x := range expanded {
+ flat[i] = flattenSchedule(x)
+ }
+ return flat
+}
+
+func flattenVolumes(expanded []lifecyclepolicy.Volume) []map[string]string {
+ flat := make([]map[string]string, len(expanded))
+ for i, volume := range expanded {
+ flat[i] = map[string]string{"id": volume.ID, "name": volume.Name}
+ }
+ return flat
+}
+
+func cronScheduleParamDescription(min, max int) string {
+ return fmt.Sprintf("Either single asterisk or comma-separated list of integers (%v-%v)", min, max)
+}
+
+func intervalScheduleParamDescription(unit string) string {
+ return fmt.Sprintf("Number of %ss to wait between actions", unit)
+}
+
+func retentionTimerParamDescription(unit string) string {
+ return fmt.Sprintf("Number of %ss to wait before deleting snapshot", unit)
+}
diff --git a/gcore/resource_gcore_lifecyclepolicy_test.go b/gcore/resource_gcore_lifecyclepolicy_test.go
new file mode 100644
index 0000000..e228ba5
--- /dev/null
+++ b/gcore/resource_gcore_lifecyclepolicy_test.go
@@ -0,0 +1,230 @@
+package gcore
+
+import (
+ "fmt"
+ "github.com/G-Core/gcorelabscloud-go/gcore/lifecyclepolicy/v1/lifecyclepolicy"
+ "github.com/G-Core/gcorelabscloud-go/gcore/network/v1/networks"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestAccLifecyclePolicy(t *testing.T) {
+ // Templates
+ resName := "acctest"
+ fullLPName := lifecyclePolicyResource + "." + resName
+ volumeId := "gcore_volume." + resName + ".id"
+ cronScheduleConfig := func(cron lifecyclepolicy.CreateCronScheduleOpts) string {
+ return fmt.Sprintf(`
+ schedule {
+ resource_name_template = "%s"
+ max_quantity = %d
+ cron {
+ timezone = "%s"
+ hour = "%s"
+ }
+ }`, cron.ResourceNameTemplate, cron.MaxQuantity, cron.Timezone, cron.Hour)
+ }
+ intervalScheduleConfig := func(interval lifecyclepolicy.CreateIntervalScheduleOpts) string {
+ return fmt.Sprintf(`
+ schedule {
+ resource_name_template = "%s"
+ max_quantity = %d
+ retention_time {
+ hours = %d
+ }
+ interval {
+ weeks = %d
+ }
+ }`, interval.ResourceNameTemplate, interval.MaxQuantity, interval.RetentionTime.Hours, interval.Weeks)
+ }
+ malformedScheduleConfig := `
+ schedule {
+ max_quantity = 1
+ interval {
+ weeks = 1
+ }
+ cron {
+ week = "1"
+ }
+ }`
+ volumeConfig := fmt.Sprintf(`
+resource "gcore_volume" "%s" {
+ %s
+ %s
+ name = "test-volume"
+ type_name = "standard"
+ size = 1
+}`, resName, projectInfo(), regionInfo())
+ policyConfig := func(opts lifecyclepolicy.CreateOpts, schedules string) string {
+ var volumes string
+ for _, id := range opts.VolumeIds {
+ volumes += fmt.Sprintf(`
+ volume {
+ id = %s
+ }`, id)
+ }
+ return fmt.Sprintf(`
+resource "%s" "%s" {
+ %s
+ %s
+ name = "%s"
+ status = "%s"
+ %s
+ %s
+}`, lifecyclePolicyResource, resName, projectInfo(), regionInfo(), opts.Name, opts.Status, volumes, schedules)
+ }
+
+ // Options
+ create := lifecyclepolicy.CreateOpts{
+ Name: "policy0",
+ Status: lifecyclepolicy.PolicyStatusPaused,
+ VolumeIds: []string{},
+ }
+ update1 := lifecyclepolicy.CreateOpts{
+ Name: "policy1",
+ Status: lifecyclepolicy.PolicyStatusActive,
+ VolumeIds: []string{volumeId},
+ }
+ update2 := lifecyclepolicy.CreateOpts{
+ Name: "policy2",
+ Status: lifecyclepolicy.PolicyStatusActive,
+ VolumeIds: []string{},
+ }
+ cronSchedule := lifecyclepolicy.CreateCronScheduleOpts{
+ CommonCreateScheduleOpts: lifecyclepolicy.CommonCreateScheduleOpts{
+ ResourceNameTemplate: "template_0",
+ MaxQuantity: 3,
+ },
+ Timezone: "Europe/London",
+ Hour: "2,8",
+ }
+ intervalSchedule := lifecyclepolicy.CreateIntervalScheduleOpts{
+ CommonCreateScheduleOpts: lifecyclepolicy.CommonCreateScheduleOpts{
+ ResourceNameTemplate: "template_1",
+ MaxQuantity: 4,
+ RetentionTime: &lifecyclepolicy.RetentionTimer{
+ Hours: 100,
+ },
+ },
+ Weeks: 1,
+ }
+
+ // Tests
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccLifecyclePolicyDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: volumeConfig + policyConfig(create, cronScheduleConfig(cronSchedule)),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckResourceExists(fullLPName),
+ resource.TestCheckResourceAttr(fullLPName, "name", create.Name),
+ resource.TestCheckResourceAttr(fullLPName, "status", create.Status.String()),
+ resource.TestCheckResourceAttr(fullLPName, "volume.#", "0"),
+ resource.TestCheckResourceAttr(fullLPName, "action", lifecyclepolicy.PolicyActionVolumeSnapshot.String()),
+ resource.TestCheckResourceAttrSet(fullLPName, "user_id"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.#", "1"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.max_quantity", strconv.Itoa(cronSchedule.MaxQuantity)),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.interval.#", "0"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.cron.#", "1"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.cron.0.timezone", cronSchedule.Timezone),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.cron.0.hour", cronSchedule.Hour),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.cron.0.minute", "0"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.cron.0.month", "*"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.resource_name_template", cronSchedule.ResourceNameTemplate),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.retention_time.#", "0"),
+ resource.TestCheckResourceAttrSet(fullLPName, "schedule.0.id"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.type", lifecyclepolicy.ScheduleTypeCron.String()),
+ ),
+ },
+ {
+ Config: volumeConfig + policyConfig(update1, cronScheduleConfig(cronSchedule)),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckResourceExists(fullLPName),
+ resource.TestCheckResourceAttr(fullLPName, "name", update1.Name),
+ resource.TestCheckResourceAttr(fullLPName, "status", update1.Status.String()),
+ resource.TestCheckResourceAttr(fullLPName, "volume.#", "1"),
+ ),
+ },
+ {
+ Config: volumeConfig + policyConfig(update2, cronScheduleConfig(cronSchedule)),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckResourceExists(fullLPName),
+ resource.TestCheckResourceAttr(fullLPName, "name", update2.Name),
+ resource.TestCheckResourceAttr(fullLPName, "volume.#", "0"),
+ ),
+ },
+ { // Delete policy, so we can test another schedule.
+ // TODO: For some reason, it doesn't call Create otherwise, even though "schedule" is ForceNew
+ Config: volumeConfig,
+ },
+ {
+ Config: volumeConfig + policyConfig(create, intervalScheduleConfig(intervalSchedule)),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckResourceExists(fullLPName),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.#", "1"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.max_quantity", strconv.Itoa(intervalSchedule.MaxQuantity)),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.interval.#", "1"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.interval.0.weeks", strconv.Itoa(intervalSchedule.Weeks)),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.interval.0.days", "0"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.cron.#", "0"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.resource_name_template", intervalSchedule.ResourceNameTemplate),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.retention_time.#", "1"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.retention_time.0.hours", strconv.Itoa(intervalSchedule.RetentionTime.Hours)),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.retention_time.0.days", "0"),
+ resource.TestCheckResourceAttrSet(fullLPName, "schedule.0.id"),
+ resource.TestCheckResourceAttr(fullLPName, "schedule.0.type", lifecyclepolicy.ScheduleTypeInterval.String()),
+ ),
+ },
+ { // Delete policy, so we can test another schedule.
+ // TODO: For some reason, it doesn't call Create otherwise, even though "schedule" is ForceNew
+ Config: volumeConfig,
+ },
+ {
+ Config: volumeConfig + policyConfig(create, malformedScheduleConfig),
+ ExpectError: regexp.MustCompile("exactly one of interval and cron blocks should be provided"),
+ },
+ },
+ })
+}
+
+func testAccLifecyclePolicyDestroy(s *terraform.State) error {
+ config := testAccProvider.Meta().(*Config)
+ volumesClient, err := CreateTestClient(config.Provider, volumesPoint, versionPointV1)
+ if err != nil {
+ return err
+ }
+ lifecyclePolicyClient, err := CreateTestClient(config.Provider, lifecyclePolicyPoint, versionPointV1)
+ if err != nil {
+ return err
+ }
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type == "gcore_volume" {
+ _, err := networks.Get(volumesClient, rs.Primary.ID).Extract()
+ if err == nil {
+ return fmt.Errorf("volume still exists")
+ }
+ if !strings.Contains(err.Error(), "not found") {
+ return err
+ }
+ } else if rs.Type == lifecyclePolicyResource {
+ id, err := strconv.Atoi(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error converting lifecycle policy ID to integer: %s", err)
+ }
+ _, err = lifecyclepolicy.Get(lifecyclePolicyClient, id, lifecyclepolicy.GetOpts{}).Extract()
+ if err == nil {
+ return fmt.Errorf("policy still exists")
+ }
+ if !strings.Contains(err.Error(), "not exist") {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/gcore/utils.go b/gcore/utils.go
index e38dcd2..f20b414 100644
--- a/gcore/utils.go
+++ b/gcore/utils.go
@@ -589,8 +589,8 @@ func StructToMap(obj interface{}) (newMap map[string]interface{}, err error) {
return
}
-// ExtractHosAndPath from url
-func ExtractHosAndPath(uri string) (host, path string, err error) {
+// ExtractHostAndPath from url
+func ExtractHostAndPath(uri string) (host, path string, err error) {
if uri == "" {
return "", "", fmt.Errorf("empty uri")
}
diff --git a/gcore/utils_test.go b/gcore/utils_test.go
index 5715343..32b93ef 100644
--- a/gcore/utils_test.go
+++ b/gcore/utils_test.go
@@ -43,16 +43,16 @@ func TestExtractHosAndPath(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- gotHost, gotPath, err := ExtractHosAndPath(tt.args.uri)
+ gotHost, gotPath, err := ExtractHostAndPath(tt.args.uri)
if (err != nil) != tt.wantErr {
- t.Errorf("ExtractHosAndPath() error = %v, wantErr %v", err, tt.wantErr)
+ t.Errorf("ExtractHostAndPath() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotHost != tt.wantHost {
- t.Errorf("ExtractHosAndPath() gotHost = %v, want %v", gotHost, tt.wantHost)
+ t.Errorf("ExtractHostAndPath() gotHost = %v, want %v", gotHost, tt.wantHost)
}
if gotPath != tt.wantPath {
- t.Errorf("ExtractHosAndPath() gotPath = %v, want %v", gotPath, tt.wantPath)
+ t.Errorf("ExtractHostAndPath() gotPath = %v, want %v", gotPath, tt.wantPath)
}
})
}
diff --git a/go.mod b/go.mod
index bd4de4a..d9bd993 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.14
require (
github.com/G-Core/gcorelabs-storage-sdk-go v0.0.9
github.com/G-Core/gcorelabscdn-go v0.0.0-20210503173228-b4ac8b2402ff
- github.com/G-Core/gcorelabscloud-go v0.4.6
+ github.com/G-Core/gcorelabscloud-go v0.4.14
github.com/google/uuid v1.1.2 // indirect
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320
github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.3
diff --git a/go.sum b/go.sum
index f580f76..a2bac1d 100644
--- a/go.sum
+++ b/go.sum
@@ -45,8 +45,8 @@ github.com/G-Core/gcorelabs-storage-sdk-go v0.0.9 h1:6uyKbknI8Q2pIJApPBf6JA0CN5O
github.com/G-Core/gcorelabs-storage-sdk-go v0.0.9/go.mod h1:BZef79y4G28n8ic3x6iQWbW+mtpHPSUyJRfIRSkeAJw=
github.com/G-Core/gcorelabscdn-go v0.0.0-20210503173228-b4ac8b2402ff h1:kIH66Shwb0Y9kvBgykpzmQn2soiHDTCJ/Rr5cQQ1cOk=
github.com/G-Core/gcorelabscdn-go v0.0.0-20210503173228-b4ac8b2402ff/go.mod h1:iSGXaTvZBzDHQW+rKFS918BgFVpONcyLEijwh8WsXpE=
-github.com/G-Core/gcorelabscloud-go v0.4.6 h1:+pNeTKWuhR52Qavnzt+r6rCDJWumymkJTjKQV4+Tl5Y=
-github.com/G-Core/gcorelabscloud-go v0.4.6/go.mod h1:Z1MF80mWagEUrxygtYkvW/MJEYNmIUPsIEYBB3cKjOM=
+github.com/G-Core/gcorelabscloud-go v0.4.14 h1:i3YNlo50tCVripozlyp0PG6DBZC5F3hp6vOz8xN9fJg=
+github.com/G-Core/gcorelabscloud-go v0.4.14/go.mod h1:Z1MF80mWagEUrxygtYkvW/MJEYNmIUPsIEYBB3cKjOM=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=