Skip to content

Commit

Permalink
Merge pull request #726 from ionos-cloud/feat/dataplatform_add_auto_s…
Browse files Browse the repository at this point in the history
…caling
  • Loading branch information
cristiGuranIonos authored Dec 11, 2024
2 parents b4b740a + 1a967d0 commit a789153
Show file tree
Hide file tree
Showing 46 changed files with 730 additions and 531 deletions.
4 changes: 0 additions & 4 deletions .gitbook.yaml

This file was deleted.

4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
## 6.6.8 -- upcoming release
## 6.6.8
### Features
- Add `auto_scaling` attribute to `ionoscloud_dataplatform_node_pool` resource.
### Fixes
- Omitting the `location` attribute for some resources no longer generates an error

Expand Down
373 changes: 0 additions & 373 deletions LICENSE

This file was deleted.

3 changes: 3 additions & 0 deletions docs/data-sources/dataplatform_node_pool.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,3 +72,6 @@ The following attributes are returned by the datasource:
* `day_of_the_week`
* `labels` - Key-value pairs attached to the node pool resource as [Kubernetes labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
* `annotations` - Key-value pairs attached to node pool resource as [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/).
* `auto_scaling` - Whether the Node Pool should autoscale. For more details, please check the API documentation
- `min_node_count` - The minimum number of worker nodes the node pool can scale down to. Should be less than max_node_count
- `max_node_count` - The maximum number of worker nodes that the node pool can scale to. Should be greater than min_node_count
7 changes: 5 additions & 2 deletions docs/resources/dataplatform_node_pool.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,13 @@ resource "ionoscloud_dataplatform_node_pool" "example" {
* `storage_type` - (Optional)[int] The type of hardware for the volume. Must be set with one of the values `HDD` or `SSD`. The default value is `SSD`.
* `storage_size` - (Optional)[int] The size of the volume in GB. The size must be greater than `10`GB. The default value is `20`.
* `maintenance_window` - (Optional) Starting time of a weekly 4 hour-long window, during which maintenance might occur in hh:mm:ss format
* `time` - (Required)[string] Time at which the maintenance should start. Must conform to the 'HH:MM:SS' 24-hour format. This pattern matches the "HH:MM:SS 24-hour format with leading 0" format. For more information take a look at [this link](https://stackoverflow.com/questions/7536755/regular-expression-for-matching-hhmm-time-format).
* `day_of_the_week` - (Required)[string] Must be set with one the values `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday`, `Saturday` or `Sunday`.
- `time` - (Required)[string] Time at which the maintenance should start. Must conform to the 'HH:MM:SS' 24-hour format. This pattern matches the "HH:MM:SS 24-hour format with leading 0" format. For more information take a look at [this link](https://stackoverflow.com/questions/7536755/regular-expression-for-matching-hhmm-time-format).
- `day_of_the_week` - (Required)[string] Must be set with one the values `Monday`, `Tuesday`, `Wednesday`, `Thursday`, `Friday`, `Saturday` or `Sunday`.
* `labels` - (Optional)[map] Key-value pairs attached to the node pool resource as [Kubernetes labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/).
* `annotations` - (Optional)[map] Key-value pairs attached to node pool resource as [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/).
* `auto_scaling` - (Optional)[string] Whether the Node Pool should autoscale. For more details, please check the API documentation
- `min_node_count` - (Optional)[int] The minimum number of worker nodes the node pool can scale down to. Should be less than max_node_count
- `max_node_count` - (Optional)[int] The maximum number of worker nodes that the node pool can scale to. Should be greater than min_node_count

## Import

Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ require (
github.com/ionos-cloud/sdk-go-bundle/shared v0.1.1
github.com/ionos-cloud/sdk-go-cert-manager v1.1.0
github.com/ionos-cloud/sdk-go-container-registry v1.2.0
github.com/ionos-cloud/sdk-go-dataplatform v1.0.3
github.com/ionos-cloud/sdk-go-dataplatform v1.1.1
github.com/ionos-cloud/sdk-go-dbaas-in-memory-db v1.0.1
github.com/ionos-cloud/sdk-go-dbaas-mariadb v1.1.1
github.com/ionos-cloud/sdk-go-dbaas-mongo v1.3.1
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,8 @@ github.com/ionos-cloud/sdk-go-container-registry v1.2.0 h1:C5r2XleKLbSFw9kmb4N8I
github.com/ionos-cloud/sdk-go-container-registry v1.2.0/go.mod h1:woBP1+A4N0KXiRj9jG4y/hEXgrVjJv0CUlAvc24mCeo=
github.com/ionos-cloud/sdk-go-dataplatform v1.0.3 h1:eaZITgPaLHbK4d/nVaImvP19a5yAumI5hprjM/B1qSE=
github.com/ionos-cloud/sdk-go-dataplatform v1.0.3/go.mod h1:LXJGnUwLLiJhKgApv7TIPGy7WOuGbYIzBPFxGpNfqN0=
github.com/ionos-cloud/sdk-go-dataplatform v1.1.1 h1:Wu9TAiphRyMEweUcQlMblhVCl9qVxQlOYEOw+jJS+Ss=
github.com/ionos-cloud/sdk-go-dataplatform v1.1.1/go.mod h1:3rQrOZFbXcjK3C5Dnjzahsd9x7PO+b4AARdBwdz//Mg=
github.com/ionos-cloud/sdk-go-dbaas-in-memory-db v1.0.1 h1:vd3SCqZ1OI5l7Tzql7NDb5Zlpna8e2SsfEYZRGHOkOo=
github.com/ionos-cloud/sdk-go-dbaas-in-memory-db v1.0.1/go.mod h1:JLbyt5wj7+f/njy7pnWD6K+cCfe32p8VrjmKks055+w=
github.com/ionos-cloud/sdk-go-dbaas-mariadb v1.1.1 h1:dZLxaH2n0nxCOKMkt49TibOiY1hkaHThpXEgJL0oZng=
Expand Down
19 changes: 19 additions & 0 deletions ionoscloud/data_source_dataplatform_node_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,25 @@ func dataSourceDataplatformNodePool() *schema.Resource {
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"auto_scaling": {
Type: schema.TypeList,
Description: "The range defining the minimum and maximum number of worker nodes that the managed node group can scale in",
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_node_count": {
Type: schema.TypeInt,
Description: "The minimum number of worker nodes the node pool can scale down to. Should be less than max_node_count",
Computed: true,
},
"max_node_count": {
Type: schema.TypeInt,
Description: "The maximum number of worker nodes that the node pool can scale to. Should be greater than min_node_count",
Computed: true,
},
},
},
},
"cluster_id": {
Type: schema.TypeString,
Required: true,
Expand Down
22 changes: 22 additions & 0 deletions ionoscloud/resource_dataplatform_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,28 @@ func resourceDataplatformNodePool() *schema.Resource {
Description: "The UUID of an existing Dataplatform cluster.",
ValidateDiagFunc: validation.ToDiagFunc(validation.IsUUID),
},
"auto_scaling": {
Type: schema.TypeList,
Description: "The range defining the minimum and maximum number of worker nodes that the managed node group can scale in",
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_node_count": {
Type: schema.TypeInt,
Description: "The minimum number of worker nodes the node pool can scale down to. Should be less than max_node_count",
Required: true,
ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(1)),
},
"max_node_count": {
Type: schema.TypeInt,
Description: "The maximum number of worker nodes that the node pool can scale to. Should be greater than min_node_count",
Required: true,
ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(1)),
},
},
},
},
},
Timeouts: &resourceDefaultTimeouts,
}
Expand Down
14 changes: 14 additions & 0 deletions ionoscloud/resource_dataplatform_nodepool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ func TestAccDataplatformNodePoolBasic(t *testing.T) {
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "labels.color", "green"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "annotations.ann1", "value1"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "annotations.ann2", "value2"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "auto_scaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "auto_scaling.0.max_node_count", "2"),
),
},
{
Expand All @@ -63,6 +65,8 @@ func TestAccDataplatformNodePoolBasic(t *testing.T) {
resource.TestCheckResourceAttrPair(constant.DataSource+"."+constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestDataSourceById, "labels.color", constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "labels.color"),
resource.TestCheckResourceAttrPair(constant.DataSource+"."+constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestDataSourceById, "annotations.ann1", constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "annotations.ann1"),
resource.TestCheckResourceAttrPair(constant.DataSource+"."+constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestDataSourceById, "annotations.ann2", constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "annotations.ann2"),
resource.TestCheckResourceAttrPair(constant.DataSource+"."+constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestDataSourceById, "auto_scaling.0.min_node_count", constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "auto_scaling.0.min_node_count"),
resource.TestCheckResourceAttrPair(constant.DataSource+"."+constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestDataSourceById, "auto_scaling.0.max_node_count", constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "auto_scaling.0.max_node_count"),
),
},
{
Expand Down Expand Up @@ -152,6 +156,8 @@ func TestAccDataplatformNodePoolBasic(t *testing.T) {
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "maintenance_window.0.day_of_the_week", "Sunday"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "labels.foo", "bar"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "annotations.ann1", "value1"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "auto_scaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr(constant.DataplatformNodePoolResource+"."+constant.DataplatformNodePoolTestResource, "auto_scaling.0.max_node_count", "3"),
),
},
},
Expand Down Expand Up @@ -266,6 +272,10 @@ resource ` + constant.DataplatformNodePoolResource + ` ` + constant.Dataplatform
ann1 = "value1"
ann2 = "value2"
}
auto_scaling {
min_node_count = 1
max_node_count = 2
}
}
`

Expand Down Expand Up @@ -306,6 +316,10 @@ resource ` + constant.DataplatformNodePoolResource + ` ` + constant.Dataplatform
annotations = {
ann1 = "value1"
}
auto_scaling {
min_node_count = 1
max_node_count = 3
}
}
`

Expand Down
81 changes: 75 additions & 6 deletions services/dataplatform/nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,20 @@ func (c *Client) ListNodePools(ctx context.Context, clusterId string) (dataplatf
}

func (c *Client) CreateNodePool(ctx context.Context, clusterId string, d *schema.ResourceData) (dataplatform.NodePoolResponseData, *dataplatform.APIResponse, error) {
dataplatformNodePool := GetDataplatformNodePoolDataCreate(d)
dataplatformNodePool, err := GetDataplatformNodePoolDataCreate(d)
if err != nil {
return dataplatform.NodePoolResponseData{}, nil, err
}
clusterResponse, apiResponse, err := c.sdkClient.DataPlatformNodePoolApi.ClustersNodepoolsPost(ctx, clusterId).CreateNodePoolRequest(*dataplatformNodePool).Execute()
apiResponse.LogInfo()
return clusterResponse, apiResponse, err
}

func (c *Client) UpdateNodePool(ctx context.Context, clusterId, nodePoolId string, d *schema.ResourceData) (dataplatform.NodePoolResponseData, utils.ApiResponseInfo, error) {
dataplatformNodePool := GetDataplatformNodePoolDataUpdate(d)
dataplatformNodePool, err := GetDataplatformNodePoolDataUpdate(d)
if err != nil {
return dataplatform.NodePoolResponseData{}, nil, err
}
clusterResponse, apiResponse, err := c.sdkClient.DataPlatformNodePoolApi.ClustersNodepoolsPatch(ctx, clusterId, nodePoolId).PatchNodePoolRequest(*dataplatformNodePool).Execute()
apiResponse.LogInfo()
return clusterResponse, apiResponse, err
Expand Down Expand Up @@ -83,7 +89,8 @@ func (c *Client) IsNodePoolReady(ctx context.Context, d *schema.ResourceData) (b
return strings.EqualFold(*subjectNodePool.Metadata.State, constant.Available), nil
}

func GetDataplatformNodePoolDataCreate(d *schema.ResourceData) *dataplatform.CreateNodePoolRequest {
// GetDataplatformNodePoolDataCreate gets the node pool data from the schema and creates a CreateNodePoolRequest
func GetDataplatformNodePoolDataCreate(d *schema.ResourceData) (*dataplatform.CreateNodePoolRequest, error) {

dataplatformNodePool := dataplatform.CreateNodePoolRequest{
Properties: &dataplatform.CreateNodePoolProperties{},
Expand Down Expand Up @@ -148,11 +155,18 @@ func GetDataplatformNodePoolDataCreate(d *schema.ResourceData) *dataplatform.Cre
}
dataplatformNodePool.Properties.Annotations = &annotations
}
var autoscaling *dataplatform.AutoScaling
var err error
if autoscaling, err = getAutoscalingData(d); err != nil {
return &dataplatformNodePool, err
}
dataplatformNodePool.Properties.AutoScaling = autoscaling

return &dataplatformNodePool
return &dataplatformNodePool, nil
}

func GetDataplatformNodePoolDataUpdate(d *schema.ResourceData) *dataplatform.PatchNodePoolRequest {
// GetDataplatformNodePoolDataUpdate gets the node pool data from the schema and creates a PatchNodePoolRequest
func GetDataplatformNodePoolDataUpdate(d *schema.ResourceData) (*dataplatform.PatchNodePoolRequest, error) {

dataplatformNodePool := dataplatform.PatchNodePoolRequest{
Properties: &dataplatform.PatchNodePoolProperties{},
Expand Down Expand Up @@ -183,7 +197,22 @@ func GetDataplatformNodePoolDataUpdate(d *schema.ResourceData) *dataplatform.Pat
dataplatformNodePool.Properties.Annotations = &annotations
}

return &dataplatformNodePool
if d.HasChange("auto_scaling.0.min_node_count") {
oldMinNodes, newMinNodes := d.GetChange("auto_scaling.0.min_node_count")
log.Printf("[INFO] dataplatform node pool autoscaling min # of nodes changed from %+v to %+v", oldMinNodes, newMinNodes)
}

if d.HasChange("auto_scaling.0.max_node_count") {
oldMaxNodes, newMaxNodes := d.GetChange("auto_scaling.0.max_node_count")
log.Printf("[INFO] dataplatform node pool autoscaling max # of nodes changed from %+v to %+v", oldMaxNodes, newMaxNodes)
}
var autoscaling *dataplatform.AutoScaling
var err error
if autoscaling, err = getAutoscalingData(d); err != nil {
return &dataplatformNodePool, err
}
dataplatformNodePool.Properties.AutoScaling = autoscaling
return &dataplatformNodePool, nil
}

func SetDataplatformNodePoolData(d *schema.ResourceData, nodePool dataplatform.NodePoolResponseData) error {
Expand Down Expand Up @@ -276,6 +305,19 @@ func SetDataplatformNodePoolData(d *schema.ResourceData, nodePool dataplatform.N
return utils.GenerateSetError(nodePoolResourceName, "annotations", err)
}
}

if nodePool.Properties.AutoScaling != nil && nodePool.Properties.AutoScaling.MinNodeCount != nil &&
nodePool.Properties.AutoScaling.MaxNodeCount != nil && (*nodePool.Properties.AutoScaling.MinNodeCount != 0 &&
*nodePool.Properties.AutoScaling.MaxNodeCount != 0) {
if err := d.Set("auto_scaling", []map[string]uint32{
{
"min_node_count": uint32(*nodePool.Properties.AutoScaling.MinNodeCount),
"max_node_count": uint32(*nodePool.Properties.AutoScaling.MaxNodeCount),
},
}); err != nil {
return err
}
}
return nil
}

Expand Down Expand Up @@ -323,3 +365,30 @@ func SetNodePoolsData(d *schema.ResourceData, results []dataplatform.NodePoolRes
}
return nil
}

func getAutoscalingData(d *schema.ResourceData) (*dataplatform.AutoScaling, error) {
var autoscaling dataplatform.AutoScaling

minNodeCount, asmnOk := d.GetOk("auto_scaling.0.min_node_count")
maxNodeCount, asmxnOk := d.GetOk("auto_scaling.0.max_node_count")
if !asmnOk && !asmxnOk {
return nil, nil
}

asmnVal := int32(minNodeCount.(int))
asmxnVal := int32(maxNodeCount.(int))
if asmnVal == asmxnVal {
return &autoscaling, fmt.Errorf("error creating dataplatform node pool: max_node_count cannot be equal to min_node_count")
}

if asmxnVal < asmnVal {
return &autoscaling, fmt.Errorf("error creating dataplatform node pool: max_node_count cannot be lower than min_node_count")
}

log.Printf("[INFO] Setting Autoscaling minimum node count to : %d", asmnVal)
autoscaling.MinNodeCount = &asmnVal
log.Printf("[INFO] Setting Autoscaling maximum node count to : %d", asmxnVal)
autoscaling.MaxNodeCount = &asmxnVal

return &autoscaling, nil
}
Loading

0 comments on commit a789153

Please sign in to comment.