Skip to content

Commit

Permalink
[Refactor] Refactor cce nodes
Browse files Browse the repository at this point in the history
  • Loading branch information
muneeb-jan committed Dec 5, 2024
1 parent e71f312 commit 2a21d75
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (

const (
resourceNameNode = "opentelekomcloud_cce_node_v3.node_1"
resourceNameNode2 = "opentelekomcloud_cce_node_v3.node_2"
resourceNameNode3 = "opentelekomcloud_cce_node_v3.node_3"
resourceNameNode4 = "opentelekomcloud_cce_node_v3.node_4"
)
Expand Down Expand Up @@ -130,7 +129,6 @@ func TestAccCCENodesV3Timeout(t *testing.T) {
}
func TestAccCCENodesV3OS(t *testing.T) {
var node nodes.Nodes
var node2 nodes.Nodes
var node3 nodes.Nodes
var node4 nodes.Nodes

Expand All @@ -148,8 +146,6 @@ func TestAccCCENodesV3OS(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckCCENodeV3Exists(resourceNameNode, shared.DataSourceClusterName, &node),
resource.TestCheckResourceAttr(resourceNameNode, "os", "EulerOS 2.5"),
testAccCheckCCENodeV3Exists(resourceNameNode2, shared.DataSourceClusterName, &node2),
resource.TestCheckResourceAttr(resourceNameNode2, "os", "CentOS 7.7"),
testAccCheckCCENodeV3Exists(resourceNameNode3, shared.DataSourceClusterName, &node3),
resource.TestCheckResourceAttr(resourceNameNode3, "os", "EulerOS 2.9"),
testAccCheckCCENodeV3Exists(resourceNameNode4, shared.DataSourceClusterName, &node4),
Expand Down Expand Up @@ -428,7 +424,7 @@ func testAccCheckCCENodeV3Destroy(s *terraform.State) error {
continue
}

_, err := nodes.Get(client, clusterID, rs.Primary.ID).Extract()
_, err := nodes.Get(client, clusterID, rs.Primary.ID)
if err == nil {
return fmt.Errorf("node still exists")
}
Expand Down Expand Up @@ -461,7 +457,7 @@ func testAccCheckCCENodeV3Exists(n string, cluster string, node *nodes.Nodes) re
return fmt.Errorf("error creating OpenTelekomCloud CCE client: %s", err)
}

found, err := nodes.Get(client, c.Primary.ID, rs.Primary.ID).Extract()
found, err := nodes.Get(client, c.Primary.ID, rs.Primary.ID)
if err != nil {
return err
}
Expand Down Expand Up @@ -499,25 +495,6 @@ resource "opentelekomcloud_cce_node_v3" "node_1" {
}
}
resource "opentelekomcloud_cce_node_v3" "node_2" {
cluster_id = data.opentelekomcloud_cce_cluster_v3.cluster.id
name = "test-node"
flavor_id = "s2.large.2"
os = "CentOS 7.7"
availability_zone = "%[2]s"
key_pair = "%[3]s"
root_volume {
size = 40
volumetype = "SATA"
}
data_volumes {
size = 100
volumetype = "SATA"
}
}
resource "opentelekomcloud_cce_node_v3" "node_3" {
cluster_id = data.opentelekomcloud_cce_cluster_v3.cluster.id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,7 @@ func resourceCCENodeV3Create(ctx context.Context, d *schema.ResourceData, meta i
}

log.Printf("[DEBUG] Create Options: %#v", createOpts)
node, err := nodes.Create(client, clusterID, createOpts).Extract()
node, err := nodes.Create(client, clusterID, createOpts)
switch err.(type) {
case golangsdk.ErrDefault403:
retryNode, err := recursiveCreate(ctx, client, createOpts, clusterID)
Expand Down Expand Up @@ -640,7 +640,7 @@ func resourceCCENodeV3Create(ctx context.Context, d *schema.ResourceData, meta i

// getNodeIDFromJob wait until job starts (status Running) and returns Node ID
func getNodeIDFromJob(ctx context.Context, client *golangsdk.ServiceClient, jobID string, timeout time.Duration) (string, error) {
job, err := nodes.GetJobDetails(client, jobID).ExtractJob()
job, err := nodes.GetJobDetails(client, jobID)
if err != nil {
return "", fmt.Errorf("error fetching OpenTelekomCloud Job Details: %s", err)
}
Expand All @@ -650,7 +650,7 @@ func getNodeIDFromJob(ctx context.Context, client *golangsdk.ServiceClient, jobI
Pending: []string{"Initializing"},
Target: []string{"Running"},
Refresh: func() (interface{}, string, error) {
subJob, err := nodes.GetJobDetails(client, jobResourceId).ExtractJob()
subJob, err := nodes.GetJobDetails(client, jobResourceId)
if err != nil {
return nil, "ERROR", fmt.Errorf("error fetching OpenTelekomCloud Job Details: %s", err)
}
Expand Down Expand Up @@ -691,7 +691,7 @@ func resourceCCENodeV3Read(ctx context.Context, d *schema.ResourceData, meta int
}

clusterID := d.Get("cluster_id").(string)
node, err := nodes.Get(client, clusterID, d.Id()).Extract()
node, err := nodes.Get(client, clusterID, d.Id())
if err != nil {
if _, ok := err.(golangsdk.ErrDefault404); ok {
d.SetId("")
Expand Down Expand Up @@ -856,7 +856,7 @@ func resourceCCENodeV3Update(ctx context.Context, d *schema.ResourceData, meta i
updateOpts.Metadata.Name = d.Get("name").(string)

clusterID := d.Get("cluster_id").(string)
_, err = nodes.Update(client, clusterID, d.Id(), updateOpts).Extract()
_, err = nodes.Update(client, clusterID, d.Id(), updateOpts)
if err != nil {
return fmterr.Errorf("error updating OpenTelekomCloud CCE node: %s", err)
}
Expand Down Expand Up @@ -941,7 +941,7 @@ func resourceCCENodeV3Delete(ctx context.Context, d *schema.ResourceData, meta i
}

clusterID := d.Get("cluster_id").(string)
if err := nodes.Delete(client, clusterID, d.Id()).ExtractErr(); err != nil {
if err := nodes.Delete(client, clusterID, d.Id()); err != nil {
return fmterr.Errorf("error deleting OpenTelekomCloud CCE Cluster: %w", err)
}
stateConf := &resource.StateChangeConf{
Expand Down Expand Up @@ -1115,7 +1115,7 @@ func checkCCENodeV3PublicIpParams(d *schema.ResourceData) {

func waitForCceNodeActive(cceClient *golangsdk.ServiceClient, clusterId, nodeId string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
n, err := nodes.Get(cceClient, clusterId, nodeId).Extract()
n, err := nodes.Get(cceClient, clusterId, nodeId)
if err != nil {
return nil, "", err
}
Expand All @@ -1128,7 +1128,7 @@ func waitForCceNodeDelete(cceClient *golangsdk.ServiceClient, clusterId, nodeId
return func() (interface{}, string, error) {
log.Printf("[DEBUG] Attempting to delete OpenTelekomCloud CCE Node %s.\n", nodeId)

r, err := nodes.Get(cceClient, clusterId, nodeId).Extract()
r, err := nodes.Get(cceClient, clusterId, nodeId)

if err != nil {
if _, ok := err.(golangsdk.ErrDefault404); ok {
Expand Down Expand Up @@ -1156,7 +1156,7 @@ func waitForClusterAvailable(cceClient *golangsdk.ServiceClient, clusterId strin
}
}

func recursiveCreate(ctx context.Context, client *golangsdk.ServiceClient, opts nodes.CreateOptsBuilder, clusterID string) (*nodes.Nodes, string) {
func recursiveCreate(ctx context.Context, client *golangsdk.ServiceClient, opts nodes.CreateOpts, clusterID string) (*nodes.Nodes, string) {
stateCluster := &resource.StateChangeConf{
Target: []string{"Available"},
Refresh: waitForClusterAvailable(client, clusterID),
Expand All @@ -1168,7 +1168,7 @@ func recursiveCreate(ctx context.Context, client *golangsdk.ServiceClient, opts
if stateErr != nil {
log.Printf("[INFO] Cluster Unavailable %s.\n", stateErr)
}
node, err := nodes.Create(client, clusterID, opts).Extract()
node, err := nodes.Create(client, clusterID, opts)
if err != nil {
if _, ok := err.(golangsdk.ErrDefault403); ok {
return recursiveCreate(ctx, client, opts, clusterID)
Expand Down

0 comments on commit 2a21d75

Please sign in to comment.