From fa4df3228585f9f8681fa1920ef9565cd8563d84 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Fri, 11 Oct 2024 23:21:50 +0530 Subject: [PATCH 1/7] fix: Updated aks module according to latest azurerm version --- .github/dependabot.yml | 4 + .github/workflows/auto_assignee.yml | 2 +- .github/workflows/automerge.yml | 4 +- .github/workflows/changelog.yaml | 2 +- .github/workflows/readme.yml | 2 +- .github/workflows/tf-checks.yml | 12 +- .github/workflows/tflint.yml | 2 +- .github/workflows/tfsec.yml | 2 +- README.yaml | 3 +- aks.tf | 433 +++++++++++++ diagnostic.tf | 110 ++++ examples/complete/example.tf | 89 ++- examples/complete/versions.tf | 2 +- examples/private_cluster/example.tf | 31 +- examples/private_cluster/versions.tf | 2 +- examples/public_cluster/example.tf | 30 +- examples/public_cluster/versions.tf | 4 +- extensions.tf | 36 ++ locals.tf | 131 ++++ main.tf | 922 --------------------------- node.tf | 103 +++ outputs.tf | 3 +- role.tf | 182 ++++++ variables.tf | 755 +++++++++++----------- versions.tf | 3 +- 25 files changed, 1495 insertions(+), 1374 deletions(-) create mode 100644 aks.tf create mode 100644 diagnostic.tf create mode 100644 extensions.tf create mode 100644 locals.tf delete mode 100644 main.tf create mode 100644 node.tf create mode 100644 role.tf diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 60d4d32..bb9cd81 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -17,3 +17,7 @@ updates: directory: "examples/private_cluster" # Location of package manifests schedule: interval: "weekly" + - package-ecosystem: "terraform" # See documentation for possible values + directory: "examples/complete" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/auto_assignee.yml b/.github/workflows/auto_assignee.yml index 9acc9b8..f8b8bcd 100644 --- a/.github/workflows/auto_assignee.yml +++ b/.github/workflows/auto_assignee.yml @@ -7,7 +7,7 @@ on: workflow_dispatch: jobs: assignee: - uses: clouddrove/github-shared-workflows/.github/workflows/auto_assignee.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/auto_assignee.yml@master secrets: GITHUB: ${{ secrets.GITHUB }} with: diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index eb86ae3..26392e2 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -4,9 +4,9 @@ on: pull_request: jobs: auto-merge: - uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/auto_merge.yml@master secrets: GITHUB: ${{ secrets.GITHUB }} with: - tfcheck: 'private_cluster-example / Check code format' + tfcheck: 'complete-example / Check code format' ... diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml index b34acec..1ee6f78 100644 --- a/.github/workflows/changelog.yaml +++ b/.github/workflows/changelog.yaml @@ -7,7 +7,7 @@ on: workflow_dispatch: jobs: changelog: - uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/changelog.yml@master secrets: inherit with: branch: 'master' diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml index 444164d..c4a5793 100644 --- a/.github/workflows/readme.yml +++ b/.github/workflows/readme.yml @@ -12,4 +12,4 @@ jobs: uses: clouddrove/github-shared-workflows/.github/workflows/readme.yml@master secrets: TOKEN : ${{ secrets.GITHUB }} - SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} \ No newline at end of file + SLACK_WEBHOOK_TERRAFORM: ${{ secrets.SLACK_WEBHOOK_TERRAFORM }} diff --git a/.github/workflows/tf-checks.yml b/.github/workflows/tf-checks.yml index b287445..b23f1b4 100644 --- a/.github/workflows/tf-checks.yml +++ b/.github/workflows/tf-checks.yml @@ -6,22 +6,22 @@ on: workflow_dispatch: jobs: basic-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/basic/' complete-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/complete/' private_cluster-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/private_cluster/' public_cluster-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: working_directory: './examples/public_cluster/' aks_with_microsoft_entra_id-example: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-checks.yml@master with: - working_directory: './examples/aks_with_microsoft_entra_id/' \ No newline at end of file + working_directory: './examples/aks_with_microsoft_entra_id/' diff --git a/.github/workflows/tflint.yml b/.github/workflows/tflint.yml index 04cca22..71a6fc4 100644 --- a/.github/workflows/tflint.yml +++ b/.github/workflows/tflint.yml @@ -6,6 +6,6 @@ on: workflow_dispatch: jobs: tf-lint: - uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tf-lint.yml@master secrets: GITHUB: ${{ secrets.GITHUB }} diff --git a/.github/workflows/tfsec.yml b/.github/workflows/tfsec.yml index 7f1003f..c203751 100644 --- a/.github/workflows/tfsec.yml +++ b/.github/workflows/tfsec.yml @@ -5,7 +5,7 @@ on: workflow_dispatch: jobs: tfsec: - uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@1.2.1 + uses: clouddrove/github-shared-workflows/.github/workflows/tfsec.yml@master secrets: inherit with: working_directory: '.' diff --git a/README.yaml b/README.yaml index 3790dae..e36a63c 100644 --- a/README.yaml +++ b/README.yaml @@ -1,3 +1,4 @@ + --- # # This is the canonical configuration for the `README.md` @@ -126,5 +127,3 @@ usage: |- log_analytics_workspace_id = module.log-analytics.workspace_id # when diagnostic_setting_enable = true && oms_agent_enabled = true } ``` - - diff --git a/aks.tf b/aks.tf new file mode 100644 index 0000000..c41e023 --- /dev/null +++ b/aks.tf @@ -0,0 +1,433 @@ + +resource "azurerm_kubernetes_cluster" "aks" { + count = var.enabled ? 1 : 0 + name = format("%s-aks", module.labels.id) + location = local.location + resource_group_name = local.resource_group_name + dns_prefix = replace(module.labels.id, "/[\\W_]/", "-") + kubernetes_version = var.kubernetes_version + automatic_upgrade_channel = var.automatic_upgrade_channel + sku_tier = var.aks_sku_tier + node_resource_group = var.node_resource_group == null ? format("%s-aks-node-rg", module.labels.id) : var.node_resource_group + disk_encryption_set_id = var.key_vault_id != null ? azurerm_disk_encryption_set.main[0].id : null + private_cluster_enabled = var.private_cluster_enabled + private_dns_zone_id = var.private_cluster_enabled ? local.private_dns_zone : null + http_application_routing_enabled = var.enable_http_application_routing + azure_policy_enabled = var.azure_policy_enabled + edge_zone = var.edge_zone + image_cleaner_enabled = var.image_cleaner_enabled + image_cleaner_interval_hours = var.image_cleaner_interval_hours + role_based_access_control_enabled = var.role_based_access_control_enabled + local_account_disabled = var.local_account_disabled + workload_identity_enabled = var.workload_identity_enabled + oidc_issuer_enabled = var.oidc_issuer_enabled + + default_node_pool { + name = local.default_node_pool.agents_pool_name + node_count = local.default_node_pool.count + vm_size = local.default_node_pool.vm_size + auto_scaling_enabled = local.default_node_pool.auto_scaling_enabled + min_count = local.default_node_pool.min_count + max_count = local.default_node_pool.max_count + max_pods = local.default_node_pool.max_pods + os_disk_type = local.default_node_pool.os_disk_type + os_disk_size_gb = local.default_node_pool.os_disk_size_gb + type = local.default_node_pool.type + vnet_subnet_id = local.default_node_pool.vnet_subnet_id + host_encryption_enabled = local.default_node_pool.host_encryption_enabled + node_public_ip_enabled = local.default_node_pool.node_public_ip_enabled + fips_enabled = local.default_node_pool.fips_enabled + node_labels = local.default_node_pool.node_labels + only_critical_addons_enabled = local.default_node_pool.only_critical_addons_enabled + orchestrator_version = local.default_node_pool.orchestrator_version + proximity_placement_group_id = local.default_node_pool.proximity_placement_group_id + scale_down_mode = local.default_node_pool.scale_down_mode + snapshot_id = local.default_node_pool.snapshot_id + tags = local.default_node_pool.tags + temporary_name_for_rotation = local.default_node_pool.temporary_name_for_rotation + ultra_ssd_enabled = local.default_node_pool.ultra_ssd_enabled + zones = local.default_node_pool.zones + node_network_profile { + node_public_ip_tags = var.node_public_ip_tags + } + + + dynamic "kubelet_config" { + for_each = var.agents_pool_kubelet_configs + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.agents_pool_max_surge + drain_timeout_in_minutes = var.agents_pool_drain_timeout_in_minutes + node_soak_duration_in_minutes = var.agents_pool_node_soak_duration_in_minutes + } + } + } + + dynamic "aci_connector_linux" { + for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] + + content { + subnet_name = var.aci_connector_linux_subnet_name + } + } + + dynamic "ingress_application_gateway" { + for_each = toset(var.ingress_application_gateway != null ? [var.ingress_application_gateway] : []) + + content { + gateway_id = ingress_application_gateway.value.gateway_id + gateway_name = ingress_application_gateway.value.gateway_name + subnet_cidr = ingress_application_gateway.value.subnet_cidr + subnet_id = ingress_application_gateway.value.subnet_id + } + } + + dynamic "key_management_service" { + for_each = var.kms_enabled ? ["key_management_service"] : [] + + content { + key_vault_key_id = var.kms_key_vault_key_id + key_vault_network_access = var.kms_key_vault_network_access + } + } + + dynamic "key_vault_secrets_provider" { + for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] + + content { + secret_rotation_enabled = var.secret_rotation_enabled + secret_rotation_interval = var.secret_rotation_interval + } + } + + dynamic "kubelet_identity" { + for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] + content { + client_id = kubelet_identity.value.client_id + object_id = kubelet_identity.value.object_id + user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id + } + } + + dynamic "http_proxy_config" { + for_each = var.enable_http_proxy ? [1] : [] + + content { + http_proxy = var.http_proxy_config.http_proxy + https_proxy = var.http_proxy_config.https_proxy + no_proxy = var.http_proxy_config.no_proxy + } + } + + + dynamic "http_proxy_config" { + for_each = var.http_proxy_config != null ? ["http_proxy_config"] : [] + + content { + http_proxy = http_proxy_config.value.http_proxy + https_proxy = http_proxy_config.value.https_proxy + no_proxy = http_proxy_config.value.no_proxy + trusted_ca = http_proxy_config.value.trusted_ca + } + } + + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + + dynamic "confidential_computing" { + for_each = var.confidential_computing == null ? [] : [var.confidential_computing] + + content { + sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled + } + } + + # dynamic "api_server_access_profile" { + # for_each = var.api_server_access_profile != null ? [1] : [] + + # content { + # authorized_ip_ranges = var.api_server_access_profile.authorized_ip_ranges + # #vnet_integration_enabled = var.api_server_access_profile.vnet_integration_enabled + # #subnet_id = var.api_server_access_profile.subnet_id + # } + # } + + dynamic "auto_scaler_profile" { + for_each = var.auto_scaler_profile_enabled ? [var.auto_scaler_profile] : [] + + content { + balance_similar_node_groups = auto_scaler_profile.value.balance_similar_node_groups + empty_bulk_delete_max = auto_scaler_profile.value.empty_bulk_delete_max + expander = auto_scaler_profile.value.expander + max_graceful_termination_sec = auto_scaler_profile.value.max_graceful_termination_sec + max_node_provisioning_time = auto_scaler_profile.value.max_node_provisioning_time + max_unready_nodes = auto_scaler_profile.value.max_unready_nodes + max_unready_percentage = auto_scaler_profile.value.max_unready_percentage + new_pod_scale_up_delay = auto_scaler_profile.value.new_pod_scale_up_delay + scale_down_delay_after_add = auto_scaler_profile.value.scale_down_delay_after_add + scale_down_delay_after_delete = auto_scaler_profile.value.scale_down_delay_after_delete + scale_down_delay_after_failure = auto_scaler_profile.value.scale_down_delay_after_failure + scale_down_unneeded = auto_scaler_profile.value.scale_down_unneeded + scale_down_unready = auto_scaler_profile.value.scale_down_unready + scale_down_utilization_threshold = auto_scaler_profile.value.scale_down_utilization_threshold + scan_interval = auto_scaler_profile.value.scan_interval + skip_nodes_with_local_storage = auto_scaler_profile.value.skip_nodes_with_local_storage + skip_nodes_with_system_pods = auto_scaler_profile.value.skip_nodes_with_system_pods + } + } + + dynamic "maintenance_window_auto_upgrade" { + for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] + content { + frequency = maintenance_window_auto_upgrade.value.frequency + interval = maintenance_window_auto_upgrade.value.interval + duration = maintenance_window_auto_upgrade.value.duration + day_of_week = maintenance_window_auto_upgrade.value.day_of_week + day_of_month = maintenance_window_auto_upgrade.value.day_of_month + week_index = maintenance_window_auto_upgrade.value.week_index + start_time = maintenance_window_auto_upgrade.value.start_time + utc_offset = maintenance_window_auto_upgrade.value.utc_offset + start_date = maintenance_window_auto_upgrade.value.start_date + + dynamic "not_allowed" { + for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed + content { + start = not_allowed.value.start + end = not_allowed.value.end + } + } + } + } + + dynamic "maintenance_window_node_os" { + for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] + content { + duration = maintenance_window_node_os.value.duration + frequency = maintenance_window_node_os.value.frequency + interval = maintenance_window_node_os.value.interval + day_of_month = maintenance_window_node_os.value.day_of_month + day_of_week = maintenance_window_node_os.value.day_of_week + start_date = maintenance_window_node_os.value.start_date + start_time = maintenance_window_node_os.value.start_time + utc_offset = maintenance_window_node_os.value.utc_offset + week_index = maintenance_window_node_os.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + + dynamic "azure_active_directory_role_based_access_control" { + for_each = var.role_based_access_control == null ? [] : var.role_based_access_control + content { + # managed = azure_active_directory_role_based_access_control.value.managed + tenant_id = azure_active_directory_role_based_access_control.value.tenant_id + admin_group_object_ids = !azure_active_directory_role_based_access_control.value.azure_rbac_enabled ? var.admin_group_id : null + azure_rbac_enabled = azure_active_directory_role_based_access_control.value.azure_rbac_enabled + } + } + + dynamic "microsoft_defender" { + for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] + + content { + log_analytics_workspace_id = var.log_analytics_workspace_id + } + } + + dynamic "oms_agent" { + for_each = var.oms_agent_enabled ? ["oms_agent"] : [] + + content { + log_analytics_workspace_id = var.log_analytics_workspace_id + msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled + } + } + + dynamic "service_mesh_profile" { + for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] + content { + mode = var.service_mesh_profile.mode + external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled + internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled + revisions = var.service_mesh_profile.revisions + } + } + dynamic "service_principal" { + for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] + + content { + client_id = var.client_id + client_secret = var.client_secret + } + } + dynamic "storage_profile" { + for_each = var.storage_profile_enabled ? ["storage_profile"] : [] + + content { + blob_driver_enabled = var.storage_profile.blob_driver_enabled + disk_driver_enabled = var.storage_profile.disk_driver_enabled + # disk_driver_version = var.storage_profile.disk_driver_version + file_driver_enabled = var.storage_profile.file_driver_enabled + snapshot_controller_enabled = var.storage_profile.snapshot_controller_enabled + } + } + + identity { + type = var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? "UserAssigned" : "SystemAssigned" + } + + dynamic "web_app_routing" { + for_each = var.web_app_routing == null ? [] : ["web_app_routing"] + + content { + dns_zone_ids = var.web_app_routing.dns_zone_id + } + } + + dynamic "linux_profile" { + for_each = var.linux_profile != null ? [true] : [] + iterator = lp + content { + admin_username = var.linux_profile.username + + ssh_key { + key_data = var.linux_profile.ssh_key + } + } + } + + dynamic "workload_autoscaler_profile" { + for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] + + content { + keda_enabled = workload_autoscaler_profile.value.keda_enabled + vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled + } + } + + + + + dynamic "windows_profile" { + for_each = var.windows_profile != null ? [var.windows_profile] : [] + + content { + admin_username = windows_profile.value.admin_username + admin_password = windows_profile.value.admin_password + license = windows_profile.value.license + + dynamic "gmsa" { + for_each = windows_profile.value.gmsa != null ? [windows_profile.value.gmsa] : [] + + content { + dns_server = gmsa.value.dns_server + root_domain = gmsa.value.root_domain + } + } + } + } + + network_profile { + network_plugin = var.network_plugin + network_policy = var.network_policy + network_data_plane = var.network_data_plane + dns_service_ip = cidrhost(var.service_cidr, 10) + service_cidr = var.service_cidr + load_balancer_sku = var.load_balancer_sku + network_plugin_mode = var.network_plugin_mode + outbound_type = var.outbound_type + pod_cidr = var.net_profile_pod_cidr + + + dynamic "load_balancer_profile" { + for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [1] : [] + + content { + idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes + managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count + managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count + outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids + outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids + outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated + } + } + } + depends_on = [ + azurerm_role_assignment.aks_uai_private_dns_zone_contributor, + ] + tags = module.labels.tags +} + diff --git a/diagnostic.tf b/diagnostic.tf new file mode 100644 index 0000000..468dfea --- /dev/null +++ b/diagnostic.tf @@ -0,0 +1,110 @@ + +resource "azurerm_monitor_diagnostic_setting" "aks_diag" { + depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 + name = format("%s-aks-diag-log", module.labels.id) + target_resource_id = azurerm_kubernetes_cluster.aks[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "metric" { + for_each = var.metric_enabled ? ["AllMetrics"] : [] + content { + category = metric.value + enabled = true + } + } + dynamic "enabled_log" { + for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] + content { + category = var.kv_logs.category != null ? enabled_log.value : null + category_group = var.kv_logs.category == null ? enabled_log.value : null + } + } + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} + +resource "azurerm_monitor_diagnostic_setting" "pip_aks" { + depends_on = [data.azurerm_resources.aks_pip, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + name = format("%s-aks-pip-diag-log", module.labels.id) + target_resource_id = data.azurerm_resources.aks_pip[count.index].resources[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "metric" { + for_each = var.metric_enabled ? ["AllMetrics"] : [] + content { + category = metric.value + enabled = true + } + } + dynamic "enabled_log" { + for_each = var.pip_logs.enabled ? var.pip_logs.category != null ? var.pip_logs.category : var.pip_logs.category_group : [] + content { + category = var.pip_logs.category != null ? enabled_log.value : null + category_group = var.pip_logs.category == null ? enabled_log.value : null + } + } + + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} + + +resource "azurerm_monitor_diagnostic_setting" "aks-nsg" { + depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + name = format("%s-aks-nsg-diag-log", module.labels.id) + target_resource_id = data.azurerm_resources.aks_nsg[count.index].resources[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "enabled_log" { + for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] + content { + category = var.kv_logs.category != null ? enabled_log.value : null + category_group = var.kv_logs.category == null ? enabled_log.value : null + } + } + + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} + +resource "azurerm_monitor_diagnostic_setting" "aks-nic" { + depends_on = [data.azurerm_resources.aks_nic, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 + name = format("%s-aks-nic-dia-log", module.labels.id) + target_resource_id = data.azurerm_resources.aks_nic[count.index].resources[0].id + storage_account_id = var.storage_account_id + eventhub_name = var.eventhub_name + eventhub_authorization_rule_id = var.eventhub_authorization_rule_id + log_analytics_workspace_id = var.log_analytics_workspace_id + log_analytics_destination_type = var.log_analytics_destination_type + + dynamic "metric" { + for_each = var.metric_enabled ? ["AllMetrics"] : [] + content { + category = metric.value + enabled = true + } + } + + lifecycle { + ignore_changes = [log_analytics_destination_type] + } +} diff --git a/examples/complete/example.tf b/examples/complete/example.tf index 8a848d4..ebf2747 100644 --- a/examples/complete/example.tf +++ b/examples/complete/example.tf @@ -1,6 +1,13 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} + data "azurerm_client_config" "current_client_config" {} module "resource_group" { @@ -27,7 +34,7 @@ module "vnet" { module "subnet" { source = "clouddrove/subnet/azure" - version = "1.2.1" + version = "1.2.0" name = "app" environment = "test" @@ -52,7 +59,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -60,12 +67,17 @@ module "log-analytics" { log_analytics_workspace_sku = "PerGB2018" resource_group_name = module.resource_group.resource_group_name log_analytics_workspace_location = module.resource_group.resource_group_location + log_analytics_workspace_id = module.log-analytics.workspace_id } module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" - name = "appakstest" + version = "1.2.0" + name = "vishal-012" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -85,41 +97,66 @@ module "vault" { reader_objects_ids = [data.azurerm_client_config.current_client_config.object_id] admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting - diagnostic_setting_enable = false - log_analytics_workspace_id = module.log-analytics.workspace_id ## when diagnostic_setting_enable = true, need to add log analytics workspace id + diagnostic_setting_enable = true + log_analytics_workspace_id = module.log-analytics.workspace_id } module "aks" { - source = "../.." - name = "app1" - environment = "test" - + source = "../../" + name = "app1-yum" + environment = "test" resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location - kubernetes_version = "1.27.7" + kubernetes_version = "1.28.9" private_cluster_enabled = false + default_node_pool = { - name = "agentpool1" - max_pods = 200 - os_disk_size_gb = 64 - vm_size = "Standard_B4ms" - count = 1 - enable_node_public_ip = false - max_surge = "33%" + name = "agentpool1" + max_pods = 200 + os_disk_size_gb = 64 + vm_size = "Standard_B4ms" + count = 1 + node_public_ip_enabled = false + auto_scaling_enabled = true + min_count = 3 + max_count = 5 } ##### if requred more than one node group. nodes_pools = [ { name = "nodegroup2" - max_pods = 200 + max_pods = 30 os_disk_size_gb = 64 vm_size = "Standard_B4ms" count = 2 enable_node_public_ip = false mode = "User" - max_surge = "33%" + auto_scaling_enabled = true + min_count = 3 + max_count = 5 + node_labels = { + "sfvfv" = "spot" + } + }, + { + name = "spot" + max_pods = null + os_disk_size_gb = null + vm_size = "Standard_D2_v3" + count = 1 + enable_node_public_ip = false + mode = null + auto_scaling_enabled = true + min_count = 1 + max_count = 1 + node_labels = { + "dsvdv" = "spot" + } + priority = "Spot" + eviction_policy = "Delete" + spot_max_price = -1 }, ] @@ -132,7 +169,15 @@ module "aks" { admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting. - microsoft_defender_enabled = true + microsoft_defender_enabled = false diagnostic_setting_enable = true - log_analytics_workspace_id = module.log-analytics.workspace_id # when diagnostic_setting_enable = true && oms_agent_enabled = true + log_analytics_workspace_id = module.log-analytics.workspace_id +} + +output "test1" { + value = module.aks.nodes_pools_with_defaults +} + +output "test" { + value = module.aks.nodes_pools } diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf index 18fc9ba..33578d1 100644 --- a/examples/complete/versions.tf +++ b/examples/complete/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 4.0.1" } } } diff --git a/examples/private_cluster/example.tf b/examples/private_cluster/example.tf index 73d6f4e..8a426f0 100644 --- a/examples/private_cluster/example.tf +++ b/examples/private_cluster/example.tf @@ -1,14 +1,21 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} + data "azurerm_client_config" "current_client_config" {} module "resource_group" { source = "clouddrove/resource-group/azure" version = "1.0.2" - name = "app" - environment = "test" + name = "app-1" + environment = "test-2" label_order = ["name", "environment", ] location = "Canada Central" } @@ -52,7 +59,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -60,12 +67,17 @@ module "log-analytics" { log_analytics_workspace_sku = "PerGB2018" resource_group_name = module.resource_group.resource_group_name log_analytics_workspace_location = module.resource_group.resource_group_location + log_analytics_workspace_id = module.log-analytics.workspace_id } module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" - name = "apptest5rds4556" + version = "1.2.0" + name = "apptest3428335" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -85,7 +97,7 @@ module "vault" { reader_objects_ids = [data.azurerm_client_config.current_client_config.object_id] admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting - diagnostic_setting_enable = false + diagnostic_setting_enable = true log_analytics_workspace_id = module.log-analytics.workspace_id ## when diagnostic_setting_enable = true, need to add log analytics workspace id } @@ -97,12 +109,13 @@ module "aks" { resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location - kubernetes_version = "1.27" + kubernetes_version = "1.28.9" + default_node_pool = { - name = "agentpool" + name = "agentpool1" max_pods = 200 os_disk_size_gb = 64 - vm_size = "Standard_B2s" + vm_size = "Standard_B4ms" count = 1 enable_node_public_ip = false } diff --git a/examples/private_cluster/versions.tf b/examples/private_cluster/versions.tf index 18fc9ba..ace4aa4 100644 --- a/examples/private_cluster/versions.tf +++ b/examples/private_cluster/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 3.112.0" } } } diff --git a/examples/public_cluster/example.tf b/examples/public_cluster/example.tf index 8e41cf0..83b6341 100644 --- a/examples/public_cluster/example.tf +++ b/examples/public_cluster/example.tf @@ -1,6 +1,14 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } + +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} + data "azurerm_client_config" "current_client_config" {} module "resource_group" { @@ -52,7 +60,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -60,12 +68,17 @@ module "log-analytics" { log_analytics_workspace_sku = "PerGB2018" resource_group_name = module.resource_group.resource_group_name log_analytics_workspace_location = module.resource_group.resource_group_location + log_analytics_workspace_id = module.log-analytics.workspace_id } module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" + version = "1.2.0" name = "apptest5rds4556" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -85,20 +98,20 @@ module "vault" { reader_objects_ids = [data.azurerm_client_config.current_client_config.object_id] admin_objects_ids = [data.azurerm_client_config.current_client_config.object_id] #### enable diagnostic setting - diagnostic_setting_enable = false + diagnostic_setting_enable = true log_analytics_workspace_id = module.log-analytics.workspace_id ## when diagnostic_setting_enable = true, need to add log analytics workspace id } module "aks" { - source = "../.." - name = "app" - environment = "test" - + source = "../.." + name = "app" + environment = "test" resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location - kubernetes_version = "1.27.7" + kubernetes_version = "1.28.9" private_cluster_enabled = false + default_node_pool = { name = "agentpool1" max_pods = 200 @@ -108,7 +121,6 @@ module "aks" { enable_node_public_ip = false } - ##### if requred more than one node group. nodes_pools = [ { diff --git a/examples/public_cluster/versions.tf b/examples/public_cluster/versions.tf index 18fc9ba..93f5d2c 100644 --- a/examples/public_cluster/versions.tf +++ b/examples/public_cluster/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 3.108.0" } } -} +} \ No newline at end of file diff --git a/extensions.tf b/extensions.tf new file mode 100644 index 0000000..b45d197 --- /dev/null +++ b/extensions.tf @@ -0,0 +1,36 @@ + +resource "azurerm_kubernetes_cluster_extension" "flux" { + depends_on = [azurerm_kubernetes_cluster.aks] + count = var.flux_enable ? 1 : 0 + name = "flux-extension" + cluster_id = join("", azurerm_kubernetes_cluster.aks[0].id) + extension_type = "microsoft.flux" + configuration_settings = { + "image-automation-controller.ssh-host-key-args" = "--ssh-hostkey-algos=rsa-sha2-512,rsa-sha2-256" + "multiTenancy.enforce" = "false" + "source-controller.ssh-host-key-args" = "--ssh-hostkey-algos=rsa-sha2-512,rsa-sha2-256" + } +} + +resource "azurerm_kubernetes_flux_configuration" "flux" { + depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_extension.flux] + count = var.flux_enable ? 1 : 0 + name = "flux-conf" + cluster_id = join("", azurerm_kubernetes_cluster.aks[0].id) + namespace = "flux-system" + scope = "cluster" + + git_repository { + url = var.flux_git_repo_url != "" ? var.flux_git_repo_url : "" + reference_type = "branch" + reference_value = var.flux_git_repo_branch + ssh_private_key_base64 = var.ssh_private_key_base64 != "" ? var.ssh_private_key_base64 : "" + } + + kustomizations { + name = "flux-system-kustomization" + timeout_in_seconds = var.flux_timeout_in_seconds + sync_interval_in_seconds = var.flux_sync_interval_in_seconds + retry_interval_in_seconds = var.flux_retry_interval_in_seconds + } +} diff --git a/locals.tf b/locals.tf new file mode 100644 index 0000000..27e3309 --- /dev/null +++ b/locals.tf @@ -0,0 +1,131 @@ + +## Managed By : CloudDrove +## Copyright @ CloudDrove. All Right Reserved. + +## Vritual Network and Subnet Creation + +data "azurerm_subscription" "current" {} +data "azurerm_client_config" "current" {} + + +locals { + private_dns_zone = var.private_dns_zone_type == "Custom" ? var.private_dns_zone_id : var.private_dns_zone_type + resource_group_name = var.resource_group_name + location = var.location + default_node_pool = { + agents_pool_name = "agentpool" + count = 1 + vm_size = "Standard_D2_v3" + os_type = "Linux" + auto_scaling_enabled = false + host_encryption_enabled = false + min_count = null + max_count = null + type = "VirtualMachineScaleSets" + node_taints = null + vnet_subnet_id = var.nodes_subnet_id + max_pods = 30 + os_disk_type = "Managed" + os_disk_size_gb = 128 + host_group_id = null + orchestrator_version = null + node_public_ip_enabled = false + mode = "System" + fips_enabled = null + node_labels = null + only_critical_addons_enabled = null + proximity_placement_group_id = null + scale_down_mode = null + snapshot_id = null + tags = null + temporary_name_for_rotation = null + ultra_ssd_enabled = null + zones = null + priority = null + eviction_policy = null + spot_max_price = null + } + # default_spot_node_pool = { + # priority = "Spot" + # eviction_policy = "Delete" + # spot_max_price = -1 + # } + nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_node_pool, ap)] + nodes_pools = [for ap in local.nodes_pools_with_defaults : ap.os_type == "Linux" ? merge(local.default_linux_node_profile, ap) : merge(local.default_windows_node_profile, ap)] + # Defaults for Linux profile + # Generally smaller images so can run more pods and require smaller HD + default_linux_node_profile = { + max_pods = 30 + os_disk_size_gb = 128 + } + + # default_spot_instanse = { + # priority = "Spot" + # eviction_policy = "Delete" + # spot_max_price = -1 + # } + + # Defaults for Windows profile + # Do not want to run same number of pods and some images can be quite large + default_windows_node_profile = { + max_pods = 20 + os_disk_size_gb = 256 + } + +} + +output "nodes_pools_with_defaults" { + value = local.nodes_pools_with_defaults +} + +output "nodes_pools" { + value = local.nodes_pools +} + +module "labels" { + + source = "clouddrove/labels/azure" + version = "1.0.0" + name = var.name + environment = var.environment + managedby = var.managedby + label_order = var.label_order + repository = var.repository +} + +##----------------------------------------------------------------------------- +## DATA BLOCKS FOR DIAGNOSTIC.TF +##----------------------------------------------------------------------------- +data "azurerm_resources" "aks_pip" { + depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + type = "Microsoft.Network/publicIPAddresses" + required_tags = { + Environment = var.environment + Name = module.labels.id + Repository = var.repository + } +} + +data "azurerm_resources" "aks_nsg" { + depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] + count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 + type = "Microsoft.Network/networkSecurityGroups" + required_tags = { + Environment = var.environment + Name = module.labels.id + Repository = var.repository + } +} + + +data "azurerm_resources" "aks_nic" { + depends_on = [azurerm_kubernetes_cluster.aks] + count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 + type = "Microsoft.Network/networkInterfaces" + required_tags = { + Environment = var.environment + Name = module.labels.id + Repository = var.repository + } +} diff --git a/main.tf b/main.tf deleted file mode 100644 index 2730b5c..0000000 --- a/main.tf +++ /dev/null @@ -1,922 +0,0 @@ -## Managed By : CloudDrove -## Copyright @ CloudDrove. All Right Reserved. - -## Vritual Network and Subnet Creation - -data "azurerm_subscription" "current" {} -data "azurerm_client_config" "current" {} - - -locals { - resource_group_name = var.resource_group_name - location = var.location - default_agent_profile = { - name = "agentpool" - count = 1 - vm_size = "Standard_D2_v3" - os_type = "Linux" - enable_auto_scaling = false - enable_host_encryption = true - min_count = null - max_count = null - type = "VirtualMachineScaleSets" - node_taints = null - vnet_subnet_id = var.nodes_subnet_id - max_pods = 30 - os_disk_type = "Managed" - os_disk_size_gb = 128 - host_group_id = null - orchestrator_version = null - enable_node_public_ip = false - mode = "System" - node_soak_duration_in_minutes = null - max_surge = null - drain_timeout_in_minutes = null - } - - default_node_pool = merge(local.default_agent_profile, var.default_node_pool) - nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_agent_profile, ap)] - nodes_pools = [for ap in local.nodes_pools_with_defaults : ap.os_type == "Linux" ? merge(local.default_linux_node_profile, ap) : merge(local.default_windows_node_profile, ap)] - # Defaults for Linux profile - # Generally smaller images so can run more pods and require smaller HD - default_linux_node_profile = { - max_pods = 30 - os_disk_size_gb = 128 - } - - # Defaults for Windows profile - # Do not want to run same number of pods and some images can be quite large - default_windows_node_profile = { - max_pods = 20 - os_disk_size_gb = 256 - } -} - -module "labels" { - - source = "clouddrove/labels/azure" - version = "1.0.0" - name = var.name - environment = var.environment - managedby = var.managedby - label_order = var.label_order - repository = var.repository -} - -locals { - private_dns_zone = var.private_dns_zone_type == "Custom" ? var.private_dns_zone_id : var.private_dns_zone_type -} - -resource "azurerm_kubernetes_cluster" "aks" { - count = var.enabled ? 1 : 0 - name = format("%s-aks", module.labels.id) - location = local.location - resource_group_name = local.resource_group_name - dns_prefix = replace(module.labels.id, "/[\\W_]/", "-") - kubernetes_version = var.kubernetes_version - automatic_channel_upgrade = var.automatic_channel_upgrade - sku_tier = var.aks_sku_tier - node_resource_group = var.node_resource_group == null ? format("%s-aks-node-rg", module.labels.id) : var.node_resource_group - disk_encryption_set_id = var.key_vault_id != null ? azurerm_disk_encryption_set.main[0].id : null - private_cluster_enabled = var.private_cluster_enabled - private_dns_zone_id = var.private_cluster_enabled ? local.private_dns_zone : null - http_application_routing_enabled = var.enable_http_application_routing - azure_policy_enabled = var.azure_policy_enabled - edge_zone = var.edge_zone - image_cleaner_enabled = var.image_cleaner_enabled - image_cleaner_interval_hours = var.image_cleaner_interval_hours - role_based_access_control_enabled = var.role_based_access_control_enabled - local_account_disabled = var.local_account_disabled - - dynamic "default_node_pool" { - for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : [] - - content { - name = var.agents_pool_name - vm_size = var.agents_size - enable_auto_scaling = var.enable_auto_scaling - enable_host_encryption = var.enable_host_encryption - enable_node_public_ip = var.enable_node_public_ip - fips_enabled = var.default_node_pool_fips_enabled - max_count = var.agents_max_count - max_pods = var.agents_max_pods - min_count = var.agents_min_count - node_labels = var.agents_labels - only_critical_addons_enabled = var.only_critical_addons_enabled - orchestrator_version = var.orchestrator_version - os_disk_size_gb = var.os_disk_size_gb - os_disk_type = var.os_disk_type - os_sku = var.os_sku - pod_subnet_id = var.pod_subnet_id - proximity_placement_group_id = var.agents_proximity_placement_group_id - scale_down_mode = var.scale_down_mode - snapshot_id = var.snapshot_id - tags = merge(var.tags, var.agents_tags) - temporary_name_for_rotation = var.temporary_name_for_rotation - type = var.agents_type - ultra_ssd_enabled = var.ultra_ssd_enabled - vnet_subnet_id = var.vnet_subnet_id - zones = var.agents_availability_zones - - node_network_profile { - node_public_ip_tags = var.node_public_ip_tags - } - dynamic "kubelet_config" { - for_each = var.agents_pool_kubelet_configs - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - } - } - - dynamic "aci_connector_linux" { - for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : [] - - content { - subnet_name = var.aci_connector_linux_subnet_name - } - } - - - dynamic "ingress_application_gateway" { - for_each = toset(var.ingress_application_gateway != null ? [var.ingress_application_gateway] : []) - - content { - gateway_id = ingress_application_gateway.value.gateway_id - gateway_name = ingress_application_gateway.value.gateway_name - subnet_cidr = ingress_application_gateway.value.subnet_cidr - subnet_id = ingress_application_gateway.value.subnet_id - } - } - - dynamic "key_management_service" { - for_each = var.kms_enabled ? ["key_management_service"] : [] - - content { - key_vault_key_id = var.kms_key_vault_key_id - key_vault_network_access = var.kms_key_vault_network_access - } - } - - dynamic "key_vault_secrets_provider" { - for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : [] - - content { - secret_rotation_enabled = var.secret_rotation_enabled - secret_rotation_interval = var.secret_rotation_interval - } - } - - dynamic "kubelet_identity" { - for_each = var.kubelet_identity == null ? [] : [var.kubelet_identity] - content { - client_id = kubelet_identity.value.client_id - object_id = kubelet_identity.value.object_id - user_assigned_identity_id = kubelet_identity.value.user_assigned_identity_id - } - } - - dynamic "http_proxy_config" { - for_each = var.enable_http_proxy ? [1] : [] - - content { - http_proxy = var.http_proxy_config.http_proxy - https_proxy = var.http_proxy_config.https_proxy - no_proxy = var.http_proxy_config.no_proxy - } - } - - dynamic "confidential_computing" { - for_each = var.confidential_computing == null ? [] : [var.confidential_computing] - - content { - sgx_quote_helper_enabled = confidential_computing.value.sgx_quote_helper_enabled - } - } - - dynamic "api_server_access_profile" { - for_each = var.api_server_access_profile != null ? [1] : [] - - content { - authorized_ip_ranges = var.api_server_access_profile.authorized_ip_ranges - vnet_integration_enabled = var.api_server_access_profile.vnet_integration_enabled - subnet_id = var.api_server_access_profile.subnet_id - } - } - - dynamic "auto_scaler_profile" { - for_each = var.auto_scaler_profile_enabled ? [var.auto_scaler_profile] : [] - - content { - balance_similar_node_groups = auto_scaler_profile.value.balance_similar_node_groups - empty_bulk_delete_max = auto_scaler_profile.value.empty_bulk_delete_max - expander = auto_scaler_profile.value.expander - max_graceful_termination_sec = auto_scaler_profile.value.max_graceful_termination_sec - max_node_provisioning_time = auto_scaler_profile.value.max_node_provisioning_time - max_unready_nodes = auto_scaler_profile.value.max_unready_nodes - max_unready_percentage = auto_scaler_profile.value.max_unready_percentage - new_pod_scale_up_delay = auto_scaler_profile.value.new_pod_scale_up_delay - scale_down_delay_after_add = auto_scaler_profile.value.scale_down_delay_after_add - scale_down_delay_after_delete = auto_scaler_profile.value.scale_down_delay_after_delete - scale_down_delay_after_failure = auto_scaler_profile.value.scale_down_delay_after_failure - scale_down_unneeded = auto_scaler_profile.value.scale_down_unneeded - scale_down_unready = auto_scaler_profile.value.scale_down_unready - scale_down_utilization_threshold = auto_scaler_profile.value.scale_down_utilization_threshold - scan_interval = auto_scaler_profile.value.scan_interval - skip_nodes_with_local_storage = auto_scaler_profile.value.skip_nodes_with_local_storage - skip_nodes_with_system_pods = auto_scaler_profile.value.skip_nodes_with_system_pods - } - } - - dynamic "maintenance_window_auto_upgrade" { - for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] - content { - frequency = maintenance_window_auto_upgrade.value.frequency - interval = maintenance_window_auto_upgrade.value.interval - duration = maintenance_window_auto_upgrade.value.duration - day_of_week = maintenance_window_auto_upgrade.value.day_of_week - day_of_month = maintenance_window_auto_upgrade.value.day_of_month - week_index = maintenance_window_auto_upgrade.value.week_index - start_time = maintenance_window_auto_upgrade.value.start_time - utc_offset = maintenance_window_auto_upgrade.value.utc_offset - start_date = maintenance_window_auto_upgrade.value.start_date - - dynamic "not_allowed" { - for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? [] : maintenance_window_auto_upgrade.value.not_allowed - content { - start = not_allowed.value.start - end = not_allowed.value.end - } - } - } - } - - dynamic "maintenance_window_node_os" { - for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] - content { - duration = maintenance_window_node_os.value.duration - frequency = maintenance_window_node_os.value.frequency - interval = maintenance_window_node_os.value.interval - day_of_month = maintenance_window_node_os.value.day_of_month - day_of_week = maintenance_window_node_os.value.day_of_week - start_date = maintenance_window_node_os.value.start_date - start_time = maintenance_window_node_os.value.start_time - utc_offset = maintenance_window_node_os.value.utc_offset - week_index = maintenance_window_node_os.value.week_index - - dynamic "not_allowed" { - for_each = maintenance_window_node_os.value.not_allowed == null ? [] : maintenance_window_node_os.value.not_allowed - content { - end = not_allowed.value.end - start = not_allowed.value.start - } - } - } - } - - dynamic "azure_active_directory_role_based_access_control" { - for_each = var.role_based_access_control == null ? [] : var.role_based_access_control - content { - managed = azure_active_directory_role_based_access_control.value.managed - tenant_id = azure_active_directory_role_based_access_control.value.tenant_id - admin_group_object_ids = !azure_active_directory_role_based_access_control.value.azure_rbac_enabled ? var.admin_group_id : null - azure_rbac_enabled = azure_active_directory_role_based_access_control.value.azure_rbac_enabled - } - } - default_node_pool { - name = local.default_node_pool.name - node_count = local.default_node_pool.count - vm_size = local.default_node_pool.vm_size - enable_auto_scaling = local.default_node_pool.enable_auto_scaling - min_count = local.default_node_pool.min_count - max_count = local.default_node_pool.max_count - max_pods = local.default_node_pool.max_pods - os_disk_type = local.default_node_pool.os_disk_type - os_disk_size_gb = local.default_node_pool.os_disk_size_gb - type = local.default_node_pool.type - vnet_subnet_id = local.default_node_pool.vnet_subnet_id - temporary_name_for_rotation = var.temporary_name_for_rotation - enable_host_encryption = local.default_node_pool.enable_host_encryption - dynamic "upgrade_settings" { - for_each = local.default_node_pool.max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = local.default_node_pool.max_surge - node_soak_duration_in_minutes = local.default_node_pool.node_soak_duration_in_minutes - drain_timeout_in_minutes = local.default_node_pool.drain_timeout_in_minutes - } - } - } - - dynamic "microsoft_defender" { - for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : [] - - content { - log_analytics_workspace_id = var.log_analytics_workspace_id - } - } - - dynamic "oms_agent" { - for_each = var.oms_agent_enabled ? ["oms_agent"] : [] - - content { - log_analytics_workspace_id = var.log_analytics_workspace_id - msi_auth_for_monitoring_enabled = var.msi_auth_for_monitoring_enabled - } - } - - dynamic "service_mesh_profile" { - for_each = var.service_mesh_profile == null ? [] : ["service_mesh_profile"] - content { - mode = var.service_mesh_profile.mode - external_ingress_gateway_enabled = var.service_mesh_profile.external_ingress_gateway_enabled - internal_ingress_gateway_enabled = var.service_mesh_profile.internal_ingress_gateway_enabled - } - } - dynamic "service_principal" { - for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : [] - - content { - client_id = var.client_id - client_secret = var.client_secret - } - } - dynamic "storage_profile" { - for_each = var.storage_profile_enabled ? ["storage_profile"] : [] - - content { - blob_driver_enabled = var.storage_profile.blob_driver_enabled - disk_driver_enabled = var.storage_profile.disk_driver_enabled - disk_driver_version = var.storage_profile.disk_driver_version - file_driver_enabled = var.storage_profile.file_driver_enabled - snapshot_controller_enabled = var.storage_profile.snapshot_controller_enabled - } - } - - identity { - type = var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? "UserAssigned" : "SystemAssigned" - } - - dynamic "web_app_routing" { - for_each = var.web_app_routing == null ? [] : ["web_app_routing"] - - content { - dns_zone_ids = var.web_app_routing.dns_zone_ids - } - } - - dynamic "linux_profile" { - for_each = var.linux_profile != null ? [true] : [] - iterator = lp - content { - admin_username = var.linux_profile.username - - ssh_key { - key_data = var.linux_profile.ssh_key - } - } - } - - dynamic "workload_autoscaler_profile" { - for_each = var.workload_autoscaler_profile == null ? [] : [var.workload_autoscaler_profile] - - content { - keda_enabled = workload_autoscaler_profile.value.keda_enabled - vertical_pod_autoscaler_enabled = workload_autoscaler_profile.value.vertical_pod_autoscaler_enabled - } - } - - - dynamic "http_proxy_config" { - for_each = var.http_proxy_config != null ? ["http_proxy_config"] : [] - - content { - http_proxy = http_proxy_config.value.http_proxy - https_proxy = http_proxy_config.value.https_proxy - no_proxy = http_proxy_config.value.no_proxy - trusted_ca = http_proxy_config.value.trusted_ca - } - } - - dynamic "windows_profile" { - for_each = var.windows_profile != null ? [var.windows_profile] : [] - - content { - admin_username = windows_profile.value.admin_username - admin_password = windows_profile.value.admin_password - license = windows_profile.value.license - - dynamic "gmsa" { - for_each = windows_profile.value.gmsa != null ? [windows_profile.value.gmsa] : [] - - content { - dns_server = gmsa.value.dns_server - root_domain = gmsa.value.root_domain - } - } - } - } - - network_profile { - network_plugin = var.network_plugin - network_policy = var.network_policy - network_data_plane = var.network_data_plane - dns_service_ip = cidrhost(var.service_cidr, 10) - service_cidr = var.service_cidr - load_balancer_sku = var.load_balancer_sku - network_plugin_mode = var.network_plugin_mode - outbound_type = var.outbound_type - pod_cidr = var.net_profile_pod_cidr - - - dynamic "load_balancer_profile" { - for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? [1] : [] - - content { - idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes - managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count - managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count - outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids - outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids - outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated - } - } - } - depends_on = [ - azurerm_role_assignment.aks_uai_private_dns_zone_contributor, - ] - tags = module.labels.tags -} -resource "azurerm_kubernetes_cluster_node_pool" "node_pools" { - count = var.enabled ? length(local.nodes_pools) : 0 - kubernetes_cluster_id = azurerm_kubernetes_cluster.aks[0].id - name = local.nodes_pools[count.index].name - vm_size = local.nodes_pools[count.index].vm_size - os_type = local.nodes_pools[count.index].os_type - os_disk_type = local.nodes_pools[count.index].os_disk_type - os_disk_size_gb = local.nodes_pools[count.index].os_disk_size_gb - vnet_subnet_id = local.nodes_pools[count.index].vnet_subnet_id - enable_auto_scaling = local.nodes_pools[count.index].enable_auto_scaling - enable_host_encryption = local.nodes_pools[count.index].enable_host_encryption - node_count = local.nodes_pools[count.index].count - min_count = local.nodes_pools[count.index].min_count - max_count = local.nodes_pools[count.index].max_count - max_pods = local.nodes_pools[count.index].max_pods - enable_node_public_ip = local.nodes_pools[count.index].enable_node_public_ip - mode = local.nodes_pools[count.index].mode - orchestrator_version = local.nodes_pools[count.index].orchestrator_version - node_taints = local.nodes_pools[count.index].node_taints - host_group_id = local.nodes_pools[count.index].host_group_id - capacity_reservation_group_id = var.capacity_reservation_group_id - workload_runtime = var.workload_runtime - zones = var.agents_availability_zones - - dynamic "kubelet_config" { - for_each = var.kubelet_config != null ? [var.kubelet_config] : [] - - content { - allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls - container_log_max_line = kubelet_config.value.container_log_max_line - container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb - cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled - cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period - cpu_manager_policy = kubelet_config.value.cpu_manager_policy - image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold - image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold - pod_max_pid = kubelet_config.value.pod_max_pid - topology_manager_policy = kubelet_config.value.topology_manager_policy - } - } - - dynamic "linux_os_config" { - for_each = var.agents_pool_linux_os_configs - - content { - swap_file_size_mb = linux_os_config.value.swap_file_size_mb - transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag - transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled - - dynamic "sysctl_config" { - for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs - - content { - fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr - fs_file_max = sysctl_config.value.fs_file_max - fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches - fs_nr_open = sysctl_config.value.fs_nr_open - kernel_threads_max = sysctl_config.value.kernel_threads_max - net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog - net_core_optmem_max = sysctl_config.value.net_core_optmem_max - net_core_rmem_default = sysctl_config.value.net_core_rmem_default - net_core_rmem_max = sysctl_config.value.net_core_rmem_max - net_core_somaxconn = sysctl_config.value.net_core_somaxconn - net_core_wmem_default = sysctl_config.value.net_core_wmem_default - net_core_wmem_max = sysctl_config.value.net_core_wmem_max - net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max - net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min - net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 - net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 - net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 - net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout - net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl - net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes - net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time - net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog - net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets - net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse - net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets - net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max - vm_max_map_count = sysctl_config.value.vm_max_map_count - vm_swappiness = sysctl_config.value.vm_swappiness - vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure - } - } - } - } - dynamic "upgrade_settings" { - for_each = local.nodes_pools[count.index].max_surge == null ? [] : ["upgrade_settings"] - - content { - max_surge = local.nodes_pools[count.index].max_surge - node_soak_duration_in_minutes = local.nodes_pools[count.index].node_soak_duration_in_minutes - drain_timeout_in_minutes = local.nodes_pools[count.index].drain_timeout_in_minutes - } - } - - windows_profile { - outbound_nat_enabled = var.outbound_nat_enabled - } -} - -resource "azurerm_role_assignment" "aks_entra_id" { - count = var.enabled && var.role_based_access_control != null && try(var.role_based_access_control[0].azure_rbac_enabled, false) == true ? length(var.admin_group_id) : 0 - scope = azurerm_kubernetes_cluster.aks[0].id - role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin" - principal_id = var.admin_group_id[count.index] -} - -# Allow aks system indentiy access to encrpty disc -resource "azurerm_role_assignment" "aks_system_identity" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id - scope = azurerm_disk_encryption_set.main[0].id - role_definition_name = "Key Vault Crypto Service Encryption User" -} - -# Allow aks system indentiy access to ACR -resource "azurerm_role_assignment" "aks_acr_access_principal_id" { - count = var.enabled && var.acr_enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id - scope = var.acr_id - role_definition_name = "AcrPull" -} - -resource "azurerm_role_assignment" "aks_acr_access_object_id" { - count = var.enabled && var.acr_enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id - scope = var.acr_id - role_definition_name = "AcrPull" -} - -# Allow user assigned identity to manage AKS items in MC_xxx RG -resource "azurerm_role_assignment" "aks_user_assigned" { - count = var.enabled ? 1 : 0 - principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id - scope = format("/subscriptions/%s/resourceGroups/%s", data.azurerm_subscription.current.subscription_id, azurerm_kubernetes_cluster.aks[0].node_resource_group) - role_definition_name = "Network Contributor" -} - -resource "azurerm_user_assigned_identity" "aks_user_assigned_identity" { - count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 - - name = format("%s-aks-mid", module.labels.id) - resource_group_name = local.resource_group_name - location = local.location -} - -resource "azurerm_role_assignment" "aks_uai_private_dns_zone_contributor" { - count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 - - scope = var.private_dns_zone_id - role_definition_name = "Private DNS Zone Contributor" - principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id -} - -resource "azurerm_role_assignment" "aks_uai_vnet_network_contributor" { - count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 - scope = var.vnet_id - role_definition_name = "Network Contributor" - principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id -} - -resource "azurerm_role_assignment" "key_vault_secrets_provider" { - count = var.enabled && var.key_vault_secrets_provider_enabled ? 1 : 0 - scope = var.key_vault_id - role_definition_name = "Key Vault Administrator" - principal_id = azurerm_kubernetes_cluster.aks[0].key_vault_secrets_provider[0].secret_identity[0].object_id -} - -resource "azurerm_role_assignment" "rbac_keyvault_crypto_officer" { - for_each = toset(var.enabled && var.cmk_enabled ? var.admin_objects_ids : []) - scope = var.key_vault_id - role_definition_name = "Key Vault Crypto Officer" - principal_id = each.value -} - -resource "azurerm_key_vault_key" "example" { - depends_on = [azurerm_role_assignment.rbac_keyvault_crypto_officer] - count = var.enabled && var.cmk_enabled ? 1 : 0 - name = format("%s-aks-encrypted-key", module.labels.id) - expiration_date = var.expiration_date - key_vault_id = var.key_vault_id - key_type = "RSA" - key_size = 2048 - key_opts = [ - "decrypt", - "encrypt", - "sign", - "unwrapKey", - "verify", - "wrapKey", - ] - dynamic "rotation_policy" { - for_each = var.rotation_policy_enabled ? var.rotation_policy : {} - content { - automatic { - time_before_expiry = rotation_policy.value.time_before_expiry - } - - expire_after = rotation_policy.value.expire_after - notify_before_expiry = rotation_policy.value.notify_before_expiry - } - } -} - -resource "azurerm_disk_encryption_set" "main" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - name = format("%s-aks-dsk-encrpted", module.labels.id) - resource_group_name = local.resource_group_name - location = local.location - key_vault_key_id = var.key_vault_id != "" ? azurerm_key_vault_key.example[0].id : null - - identity { - type = "SystemAssigned" - } -} - -resource "azurerm_role_assignment" "azurerm_disk_encryption_set_key_vault_access" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - principal_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id - scope = var.key_vault_id - role_definition_name = "Key Vault Crypto Service Encryption User" -} - -resource "azurerm_key_vault_access_policy" "main" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - - key_vault_id = var.key_vault_id - - tenant_id = azurerm_disk_encryption_set.main[0].identity[0].tenant_id - object_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id - key_permissions = [ - "Get", - "WrapKey", - "UnwrapKey" - ] - certificate_permissions = [ - "Get" - ] -} - -resource "azurerm_key_vault_access_policy" "key_vault" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - - key_vault_id = var.key_vault_id - - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id - - key_permissions = ["Get"] - certificate_permissions = ["Get"] - secret_permissions = ["Get"] -} - -resource "azurerm_key_vault_access_policy" "kubelet_identity" { - count = var.enabled && var.cmk_enabled ? 1 : 0 - - key_vault_id = var.key_vault_id - - tenant_id = data.azurerm_client_config.current.tenant_id - object_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id - - key_permissions = ["Get"] - certificate_permissions = ["Get"] - secret_permissions = ["Get"] -} - -resource "azurerm_monitor_diagnostic_setting" "aks_diag" { - depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 - name = format("%s-aks-diag-log", module.labels.id) - target_resource_id = azurerm_kubernetes_cluster.aks[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "metric" { - for_each = var.metric_enabled ? ["AllMetrics"] : [] - content { - category = metric.value - enabled = true - } - } - dynamic "enabled_log" { - for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] - content { - category = var.kv_logs.category != null ? enabled_log.value : null - category_group = var.kv_logs.category == null ? enabled_log.value : null - } - } - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -data "azurerm_resources" "aks_pip" { - depends_on = [azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - type = "Microsoft.Network/publicIPAddresses" - required_tags = { - Environment = var.environment - Name = module.labels.id - Repository = var.repository - } -} - -resource "azurerm_monitor_diagnostic_setting" "pip_aks" { - depends_on = [data.azurerm_resources.aks_pip, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - name = format("%s-aks-pip-diag-log", module.labels.id) - target_resource_id = data.azurerm_resources.aks_pip[count.index].resources[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "metric" { - for_each = var.metric_enabled ? ["AllMetrics"] : [] - content { - category = metric.value - enabled = true - } - } - dynamic "enabled_log" { - for_each = var.pip_logs.enabled ? var.pip_logs.category != null ? var.pip_logs.category : var.pip_logs.category_group : [] - content { - category = var.pip_logs.category != null ? enabled_log.value : null - category_group = var.pip_logs.category == null ? enabled_log.value : null - } - } - - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -data "azurerm_resources" "aks_nsg" { - depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - type = "Microsoft.Network/networkSecurityGroups" - required_tags = { - Environment = var.environment - Name = module.labels.id - Repository = var.repository - } -} - -resource "azurerm_monitor_diagnostic_setting" "aks-nsg" { - depends_on = [data.azurerm_resources.aks_nsg, azurerm_kubernetes_cluster.aks] - count = var.enabled && var.diagnostic_setting_enable ? 1 : 0 - name = format("%s-aks-nsg-diag-log", module.labels.id) - target_resource_id = data.azurerm_resources.aks_nsg[count.index].resources[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "enabled_log" { - for_each = var.kv_logs.enabled ? var.kv_logs.category != null ? var.kv_logs.category : var.kv_logs.category_group : [] - content { - category = var.kv_logs.category != null ? enabled_log.value : null - category_group = var.kv_logs.category == null ? enabled_log.value : null - } - } - - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -data "azurerm_resources" "aks_nic" { - depends_on = [azurerm_kubernetes_cluster.aks] - count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 - type = "Microsoft.Network/networkInterfaces" - required_tags = { - Environment = var.environment - Name = module.labels.id - Repository = var.repository - } -} - -resource "azurerm_monitor_diagnostic_setting" "aks-nic" { - depends_on = [data.azurerm_resources.aks_nic, azurerm_kubernetes_cluster.aks, azurerm_kubernetes_cluster_node_pool.node_pools] - count = var.enabled && var.diagnostic_setting_enable && var.private_cluster_enabled == true ? 1 : 0 - name = format("%s-aks-nic-dia-log", module.labels.id) - target_resource_id = data.azurerm_resources.aks_nic[count.index].resources[0].id - storage_account_id = var.storage_account_id - eventhub_name = var.eventhub_name - eventhub_authorization_rule_id = var.eventhub_authorization_rule_id - log_analytics_workspace_id = var.log_analytics_workspace_id - log_analytics_destination_type = var.log_analytics_destination_type - - dynamic "metric" { - for_each = var.metric_enabled ? ["AllMetrics"] : [] - content { - category = metric.value - enabled = true - } - } - - lifecycle { - ignore_changes = [log_analytics_destination_type] - } -} - -## AKS user authentication with Azure Rbac. -resource "azurerm_role_assignment" "example" { - for_each = var.enabled && var.aks_user_auth_role != null ? { for k in var.aks_user_auth_role : k.principal_id => k } : null - # scope = - scope = each.value.scope - role_definition_name = each.value.role_definition_name - principal_id = each.value.principal_id -} \ No newline at end of file diff --git a/node.tf b/node.tf new file mode 100644 index 0000000..dfb2679 --- /dev/null +++ b/node.tf @@ -0,0 +1,103 @@ + +resource "azurerm_kubernetes_cluster_node_pool" "node_pools" { + count = var.enabled ? length(var.nodes_pools) : 0 + kubernetes_cluster_id = azurerm_kubernetes_cluster.aks[0].id + name = var.nodes_pools[count.index].name + vm_size = var.nodes_pools[count.index].vm_size + os_type = var.nodes_pools[count.index].os_type + os_disk_type = var.nodes_pools[count.index].os_disk_type + os_disk_size_gb = var.nodes_pools[count.index].os_disk_size_gb + vnet_subnet_id = var.nodes_pools[count.index].vnet_subnet_id + auto_scaling_enabled = var.nodes_pools[count.index].auto_scaling_enabled + host_encryption_enabled = var.nodes_pools[count.index].host_encryption_enabled + node_count = var.nodes_pools[count.index].count + min_count = var.nodes_pools[count.index].min_count + max_count = var.nodes_pools[count.index].max_count + max_pods = var.nodes_pools[count.index].max_pods + node_public_ip_enabled = var.nodes_pools[count.index].node_public_ip_enabled + mode = var.nodes_pools[count.index].mode + orchestrator_version = var.nodes_pools[count.index].orchestrator_version + node_taints = var.nodes_pools[count.index].node_taints + host_group_id = var.nodes_pools[count.index].host_group_id + node_labels = var.nodes_pools[count.index].node_labels + capacity_reservation_group_id = var.capacity_reservation_group_id + workload_runtime = var.workload_runtime + zones = var.agents_availability_zones + + # Add the optional spot instance attributes with conditional expressions + priority = lookup(var.nodes_pools[count.index], "priority", null) + eviction_policy = lookup(var.nodes_pools[count.index], "eviction_policy", null) + spot_max_price = lookup(var.nodes_pools[count.index], "spot_max_price", null) + + dynamic "kubelet_config" { + for_each = var.kubelet_config != null ? [var.kubelet_config] : [] + + content { + allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls + container_log_max_line = kubelet_config.value.container_log_max_line + container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb + cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled + cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period + cpu_manager_policy = kubelet_config.value.cpu_manager_policy + image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold + image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold + pod_max_pid = kubelet_config.value.pod_max_pid + topology_manager_policy = kubelet_config.value.topology_manager_policy + } + } + + dynamic "linux_os_config" { + for_each = var.agents_pool_linux_os_configs + + content { + swap_file_size_mb = linux_os_config.value.swap_file_size_mb + transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag + transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled + + dynamic "sysctl_config" { + for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs + content { + fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr + fs_file_max = sysctl_config.value.fs_file_max + fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches + fs_nr_open = sysctl_config.value.fs_nr_open + kernel_threads_max = sysctl_config.value.kernel_threads_max + net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog + net_core_optmem_max = sysctl_config.value.net_core_optmem_max + net_core_rmem_default = sysctl_config.value.net_core_rmem_default + net_core_rmem_max = sysctl_config.value.net_core_rmem_max + net_core_somaxconn = sysctl_config.value.net_core_somaxconn + net_core_wmem_default = sysctl_config.value.net_core_wmem_default + net_core_wmem_max = sysctl_config.value.net_core_wmem_max + net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max + net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min + net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1 + net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2 + net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3 + net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout + net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl + net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes + net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time + net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog + net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets + net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse + net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets + net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max + vm_max_map_count = sysctl_config.value.vm_max_map_count + vm_swappiness = sysctl_config.value.vm_swappiness + vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure + } + } + } + } + dynamic "upgrade_settings" { + for_each = var.agents_pool_max_surge == null ? [] : ["upgrade_settings"] + content { + max_surge = var.agents_pool_max_surge + } + } + + windows_profile { + outbound_nat_enabled = var.outbound_nat_enabled + } +} diff --git a/outputs.tf b/outputs.tf index 230b5d6..b1e382c 100644 --- a/outputs.tf +++ b/outputs.tf @@ -26,5 +26,4 @@ output "node_resource_group" { output "key_vault_secrets_provider" { value = var.enabled && var.key_vault_secrets_provider_enabled ? azurerm_kubernetes_cluster.aks[0].key_vault_secrets_provider[0].secret_identity[0].object_id : null description = "Specifies the obejct id of key vault secrets provider " -} - +} \ No newline at end of file diff --git a/role.tf b/role.tf new file mode 100644 index 0000000..65827aa --- /dev/null +++ b/role.tf @@ -0,0 +1,182 @@ + +resource "azurerm_role_assignment" "aks_entra_id" { + count = var.enabled && var.role_based_access_control != null && try(var.role_based_access_control[0].azure_rbac_enabled, false) == true ? length(var.admin_group_id) : 0 + scope = azurerm_kubernetes_cluster.aks[0].id + role_definition_name = "Azure Kubernetes Service RBAC Cluster Admin" + principal_id = var.admin_group_id[count.index] +} + +# Allow aks system indentiy access to encrpty disc +resource "azurerm_role_assignment" "aks_system_identity" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + scope = azurerm_disk_encryption_set.main[0].id + role_definition_name = "Contributor" +} + +# Allow aks system indentiy access to ACR +resource "azurerm_role_assignment" "aks_acr_access_principal_id" { + count = var.enabled && var.acr_enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + scope = var.acr_id + role_definition_name = "AcrPull" +} + +resource "azurerm_role_assignment" "aks_acr_access_object_id" { + count = var.enabled && var.acr_enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id + scope = var.acr_id + role_definition_name = "AcrPull" +} + +# Allow user assigned identity to manage AKS items in MC_xxx RG +resource "azurerm_role_assignment" "aks_user_assigned" { + count = var.enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id + scope = format("/subscriptions/%s/resourceGroups/%s", data.azurerm_subscription.current.subscription_id, azurerm_kubernetes_cluster.aks[0].node_resource_group) + role_definition_name = "Network Contributor" +} + +resource "azurerm_user_assigned_identity" "aks_user_assigned_identity" { + count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 + + name = format("%s-aks-mid", module.labels.id) + resource_group_name = local.resource_group_name + location = local.location +} + +resource "azurerm_role_assignment" "aks_uai_private_dns_zone_contributor" { + count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 + + scope = var.private_dns_zone_id + role_definition_name = "Private DNS Zone Contributor" + principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id +} + +resource "azurerm_role_assignment" "aks_uai_vnet_network_contributor" { + count = var.enabled && var.private_cluster_enabled && var.private_dns_zone_type == "Custom" ? 1 : 0 + scope = var.vnet_id + role_definition_name = "Network Contributor" + principal_id = azurerm_user_assigned_identity.aks_user_assigned_identity[0].principal_id +} + +resource "azurerm_role_assignment" "rbac_keyvault_crypto_officer" { + for_each = toset(var.enabled && var.cmk_enabled ? var.admin_objects_ids : []) + scope = var.key_vault_id + role_definition_name = "Key Vault Crypto Officer" + principal_id = each.value +} + +resource "azurerm_key_vault_key" "example" { + depends_on = [azurerm_role_assignment.rbac_keyvault_crypto_officer] + count = var.enabled && var.cmk_enabled ? 1 : 0 + name = format("%s-aks-encrypted-key", module.labels.id) + expiration_date = var.expiration_date + key_vault_id = var.key_vault_id + key_type = "RSA" + key_size = 2048 + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + dynamic "rotation_policy" { + for_each = var.rotation_policy_enabled ? var.rotation_policy : {} + content { + automatic { + time_before_expiry = rotation_policy.value.time_before_expiry + } + + expire_after = rotation_policy.value.expire_after + notify_before_expiry = rotation_policy.value.notify_before_expiry + } + } +} + +resource "azurerm_disk_encryption_set" "main" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + name = format("%s-aks-dsk-encrpted", module.labels.id) + resource_group_name = local.resource_group_name + location = local.location + key_vault_key_id = var.key_vault_id != "" ? azurerm_key_vault_key.example[0].id : null + + identity { + type = "SystemAssigned" + } +} + +resource "azurerm_role_assignment" "azurerm_disk_encryption_set_key_vault_access" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + principal_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id + scope = var.key_vault_id + role_definition_name = "Key Vault Crypto Service Encryption User" +} + +## AKS user authentication with Azure Rbac. +resource "azurerm_role_assignment" "example" { + for_each = var.enabled && var.aks_user_auth_role != null ? { for k in var.aks_user_auth_role : k.principal_id => k } : null + # scope = + scope = each.value.scope + role_definition_name = each.value.role_definition_name + principal_id = each.value.principal_id +} + +resource "azurerm_key_vault_access_policy" "main" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + + key_vault_id = var.key_vault_id + + tenant_id = azurerm_disk_encryption_set.main[0].identity[0].tenant_id + object_id = azurerm_disk_encryption_set.main[0].identity[0].principal_id + key_permissions = [ + "Get", + "WrapKey", + "UnwrapKey" + ] + certificate_permissions = [ + "Get" + ] +} + +resource "azurerm_key_vault_access_policy" "key_vault" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + + key_vault_id = var.key_vault_id + + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + + key_permissions = ["Get"] + certificate_permissions = ["Get"] + secret_permissions = ["Get"] +} + +resource "azurerm_key_vault_access_policy" "kubelet_identity" { + count = var.enabled && var.cmk_enabled ? 1 : 0 + + key_vault_id = var.key_vault_id + + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = azurerm_kubernetes_cluster.aks[0].kubelet_identity[0].object_id + + key_permissions = ["Get"] + certificate_permissions = ["Get"] + secret_permissions = ["Get"] +} + +resource "azurerm_role_assignment" "aks_system_object_id" { + count = var.enabled ? 1 : 0 + principal_id = azurerm_kubernetes_cluster.aks[0].identity[0].principal_id + scope = var.vnet_id + role_definition_name = "Network Contributor" +} + +resource "azurerm_role_assignment" "key_vault_secrets_provider" { + count = var.enabled && var.key_vault_secrets_provider_enabled ? 1 : 0 + scope = var.key_vault_id + role_definition_name = "Key Vault Administrator" + principal_id = azurerm_kubernetes_cluster.aks[0].key_vault_secrets_provider[0].secret_identity[0].object_id +} diff --git a/variables.tf b/variables.tf index 7f04477..1ec3cde 100644 --- a/variables.tf +++ b/variables.tf @@ -1,10 +1,11 @@ -#Module : LABEL -#Description : Terraform label module variables. +##----------------------------------------------------------------------------- +## GLOBAL VARIABLE +##----------------------------------------------------------------------------- variable "name" { type = string default = "" - description = "Name (e.g. `app` or `cluster`)." + description = "Name (e.g. `app` or `cluster`)." } variable "repository" { @@ -36,7 +37,9 @@ variable "managedby" { default = "hello@clouddrove.com" description = "ManagedBy, eg 'CloudDrove'." } - +##----------------------------------------------------------------------------- +## KUBERNETES_CLUSTER VARIABLE +##----------------------------------------------------------------------------- variable "enabled" { type = bool default = true @@ -61,164 +64,6 @@ variable "kubernetes_version" { description = "Version of Kubernetes to deploy" } -variable "workload_runtime" { - type = string - default = null - description = "Used to specify the workload runtime. Allowed values are OCIContainer, WasmWasi and KataMshvVmIsolation." -} - -variable "agents_pool_name" { - type = string - default = "nodepool" - description = "The default Azure AKS agentpool (nodepool) name." - nullable = false -} - -variable "agents_size" { - type = string - default = "Standard_D2s_v3" - description = "The default virtual machine size for the Kubernetes agents. Changing this without specifying `var.temporary_name_for_rotation` forces a new resource to be created." -} - -variable "enable_auto_scaling" { - type = bool - default = false - description = "Enable node pool autoscaling" -} - -variable "enable_host_encryption" { - type = bool - default = false - description = "Enable Host Encryption for default node pool. Encryption at host feature must be enabled on the subscription: https://docs.microsoft.com/azure/virtual-machines/linux/disks-enable-host-based-encryption-cli" -} - -variable "enable_node_public_ip" { - type = bool - default = false - description = "(Optional) Should nodes in this Node Pool have a Public IP Address? Defaults to false." -} - -variable "default_node_pool_fips_enabled" { - type = bool - default = null - description = " (Optional) Should the nodes in this Node Pool have Federal Information Processing Standard enabled? Changing this forces a new resource to be created." -} - -variable "agents_max_count" { - type = number - default = null - description = "Maximum number of nodes in a pool" -} - -variable "agents_max_pods" { - type = number - default = null - description = "The maximum number of pods that can run on each agent. Changing this forces a new resource to be created." -} - -variable "agents_min_count" { - type = number - default = null - description = "Minimum number of nodes in a pool" -} - -variable "agents_labels" { - type = map(string) - default = {} - description = "A map of Kubernetes labels which should be applied to nodes in the Default Node Pool. Changing this forces a new resource to be created." -} - -variable "only_critical_addons_enabled" { - type = bool - default = null - description = "(Optional) Enabling this option will taint default node pool with `CriticalAddonsOnly=true:NoSchedule` taint. Changing this forces a new resource to be created." -} - -variable "orchestrator_version" { - type = string - default = null - description = "Specify which Kubernetes release to use for the orchestration layer. The default used is the latest Kubernetes version available in the region" -} - -variable "os_disk_size_gb" { - type = number - default = 50 - description = "Disk size of nodes in GBs." -} - -variable "os_disk_type" { - type = string - default = "Managed" - description = "The type of disk which should be used for the Operating System. Possible values are `Ephemeral` and `Managed`. Defaults to `Managed`. Changing this forces a new resource to be created." - nullable = false -} - -variable "os_sku" { - type = string - default = null - description = "(Optional) Specifies the OS SKU used by the agent pool. Possible values include: `Ubuntu`, `CBLMariner`, `Mariner`, `Windows2019`, `Windows2022`. If not specified, the default is `Ubuntu` if OSType=Linux or `Windows2019` if OSType=Windows. And the default Windows OSSKU will be changed to `Windows2022` after Windows2019 is deprecated. Changing this forces a new resource to be created." -} - -variable "pod_subnet_id" { - type = string - default = null - description = "(Optional) The ID of the Subnet where the pods in the default Node Pool should exist. Changing this forces a new resource to be created." -} - -variable "agents_proximity_placement_group_id" { - type = string - default = null - description = "The ID of the Proximity Placement Group of the default Azure AKS agentpool (nodepool). Changing this forces a new resource to be created." -} - -variable "scale_down_mode" { - type = string - default = "Delete" - description = "Specifies the autoscaling behaviour of the Kubernetes Cluster. If not specified, it defaults to `Delete`. Possible values include `Delete` and `Deallocate`. Changing this forces a new resource to be created." -} - -variable "snapshot_id" { - type = string - default = null - description = "(Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property." -} - -variable "tags" { - type = map(string) - default = {} - description = "Any tags that should be present on the AKS cluster resources" -} - -variable "agents_tags" { - type = map(string) - default = {} - description = "A mapping of tags to assign to the Node Pool." -} - -variable "temporary_name_for_rotation" { - type = string - default = "tempnode" - description = "Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. the `var.agents_size` is no longer ForceNew and can be resized by specifying `temporary_name_for_rotation`" -} - -variable "agents_type" { - type = string - default = "VirtualMachineScaleSets" - description = "(Optional) The type of Node Pool which should be created. Possible values are AvailabilitySet and VirtualMachineScaleSets. Defaults to VirtualMachineScaleSets." -} - -variable "ultra_ssd_enabled" { - type = bool - default = false - description = "(Optional) Used to specify whether the UltraSSD is enabled in the Default Node Pool. Defaults to false." -} - -variable "vnet_subnet_id" { - type = string - default = null - description = "(Optional) The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created." -} - variable "agents_availability_zones" { type = list(string) default = null @@ -304,6 +149,24 @@ EOT nullable = false } +variable "agents_pool_max_surge" { + type = string + default = null + description = "The maximum number or percentage of nodes which will be added to the Default Node Pool size during an upgrade." +} + +variable "agents_pool_node_soak_duration_in_minutes" { + type = number + default = 0 + description = "(Optional) The amount of time in minutes to wait after draining a node and before reimaging and moving on to next node. Defaults to 0." +} + +variable "agents_pool_drain_timeout_in_minutes" { + type = number + default = null + description = "(Optional) The amount of time in minutes to wait on eviction of pods and graceful termination per node. This eviction wait time honors waiting on pod disruption budgets. If this time is exceeded, the upgrade fails. Unsetting this after configuring it will force a new resource to be created." +} + variable "aci_connector_linux_enabled" { type = bool default = false @@ -324,12 +187,6 @@ variable "aks_sku_tier" { description = "aks sku tier. Possible values are Free ou Paid" } -variable "private_cluster_enabled" { - type = bool - default = true - description = "Configure AKS as a Private Cluster : https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#private_cluster_enabled" -} - variable "node_resource_group" { type = string default = null @@ -350,33 +207,6 @@ https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/ EOD } -variable "default_node_pool" { - description = < Date: Fri, 11 Oct 2024 23:27:13 +0530 Subject: [PATCH 2/7] fix: fixed version --- examples/aks_with_microsoft_entra_id/versions.tf | 2 +- examples/basic/versions.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/aks_with_microsoft_entra_id/versions.tf b/examples/aks_with_microsoft_entra_id/versions.tf index 18fc9ba..33578d1 100644 --- a/examples/aks_with_microsoft_entra_id/versions.tf +++ b/examples/aks_with_microsoft_entra_id/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 4.0.1" } } } diff --git a/examples/basic/versions.tf b/examples/basic/versions.tf index f3fa032..0619d27 100644 --- a/examples/basic/versions.tf +++ b/examples/basic/versions.tf @@ -7,7 +7,7 @@ terraform { required_providers { azurerm = { source = "hashicorp/azurerm" - version = "3.112.0" + version = ">= 4.0.1" } } } \ No newline at end of file From 367a701c2bdf0670936d6dd88d72f0e9134bd643 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Fri, 11 Oct 2024 23:32:15 +0530 Subject: [PATCH 3/7] fix: Updated vault tag in aks_with_microsoft_entra_id example --- examples/aks_with_microsoft_entra_id/example.tf | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/examples/aks_with_microsoft_entra_id/example.tf b/examples/aks_with_microsoft_entra_id/example.tf index a8e0bf0..6c06200 100644 --- a/examples/aks_with_microsoft_entra_id/example.tf +++ b/examples/aks_with_microsoft_entra_id/example.tf @@ -1,5 +1,11 @@ provider "azurerm" { features {} + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" +} +provider "azurerm" { + features {} + alias = "peer" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } data "azurerm_client_config" "current_client_config" {} @@ -52,7 +58,7 @@ module "subnet" { module "log-analytics" { source = "clouddrove/log-analytics/azure" - version = "1.0.1" + version = "1.1.0" name = "app" environment = "test" label_order = ["name", "environment"] @@ -64,8 +70,12 @@ module "log-analytics" { module "vault" { source = "clouddrove/key-vault/azure" - version = "1.1.0" + version = "1.2.0" name = "apptestwvshaks" + providers = { + azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } #environment = local.environment resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location From 2a423d66e38761c575ccfa4f66ce37ca6e487494 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Mon, 14 Oct 2024 16:52:55 +0530 Subject: [PATCH 4/7] fix: spot instance nodepool --- examples/complete/example.tf | 10 +++++----- locals.tf | 5 +++-- variables.tf | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/examples/complete/example.tf b/examples/complete/example.tf index ebf2747..d7719a6 100644 --- a/examples/complete/example.tf +++ b/examples/complete/example.tf @@ -1,11 +1,11 @@ provider "azurerm" { features {} - subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" + subscription_id = "068245d4-3c94-42fe-9c4d-9e5e1cabc60c" } provider "azurerm" { features {} alias = "peer" - subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" + subscription_id = "068245d4-3c94-42fe-9c4d-9e5e1cabc60c" } data "azurerm_client_config" "current_client_config" {} @@ -112,7 +112,7 @@ module "aks" { private_cluster_enabled = false default_node_pool = { - name = "agentpool1" + name = "default-nodepool" max_pods = 200 os_disk_size_gb = 64 vm_size = "Standard_B4ms" @@ -126,7 +126,7 @@ module "aks" { ##### if requred more than one node group. nodes_pools = [ { - name = "nodegroup2" + name = "nodepool2" max_pods = 30 os_disk_size_gb = 64 vm_size = "Standard_B4ms" @@ -141,7 +141,7 @@ module "aks" { } }, { - name = "spot" + name = "spotnodepool" max_pods = null os_disk_size_gb = null vm_size = "Standard_D2_v3" diff --git a/locals.tf b/locals.tf index 27e3309..a7f96b6 100644 --- a/locals.tf +++ b/locals.tf @@ -12,7 +12,7 @@ locals { private_dns_zone = var.private_dns_zone_type == "Custom" ? var.private_dns_zone_id : var.private_dns_zone_type resource_group_name = var.resource_group_name location = var.location - default_node_pool = { + default_agent_profile = { agents_pool_name = "agentpool" count = 1 vm_size = "Standard_D2_v3" @@ -50,7 +50,8 @@ locals { # eviction_policy = "Delete" # spot_max_price = -1 # } - nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_node_pool, ap)] + default_node_pool = merge(local.default_agent_profile, var.default_node_pool) + nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_agent_profile, ap)] nodes_pools = [for ap in local.nodes_pools_with_defaults : ap.os_type == "Linux" ? merge(local.default_linux_node_profile, ap) : merge(local.default_windows_node_profile, ap)] # Defaults for Linux profile # Generally smaller images so can run more pods and require smaller HD diff --git a/variables.tf b/variables.tf index 1ec3cde..698d2df 100644 --- a/variables.tf +++ b/variables.tf @@ -938,7 +938,7 @@ variable "nodes_pools" { max_count = null os_type = "Linux" os_disk_type = "Managed" - vnet_subnet_id = "" + vnet_subnet_id = null host_encryption_enabled = false orchestrator_version = null node_labels = null From c85998f454f48a0b438508e9eed4826bd93cb29e Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Tue, 15 Oct 2024 18:31:12 +0530 Subject: [PATCH 5/7] fix: updated subnet module tag --- aks.tf | 16 +++---- examples/complete/example.tf | 66 ++++++++++++++--------------- examples/private_cluster/example.tf | 2 +- examples/public_cluster/example.tf | 2 +- locals.tf | 17 -------- node.tf | 44 +++++++++---------- variables.tf | 16 +++---- 7 files changed, 69 insertions(+), 94 deletions(-) diff --git a/aks.tf b/aks.tf index c41e023..a30f113 100644 --- a/aks.tf +++ b/aks.tf @@ -209,15 +209,12 @@ resource "azurerm_kubernetes_cluster" "aks" { } } - # dynamic "api_server_access_profile" { - # for_each = var.api_server_access_profile != null ? [1] : [] - - # content { - # authorized_ip_ranges = var.api_server_access_profile.authorized_ip_ranges - # #vnet_integration_enabled = var.api_server_access_profile.vnet_integration_enabled - # #subnet_id = var.api_server_access_profile.subnet_id - # } - # } + dynamic "api_server_access_profile" { + for_each = var.api_server_access_profile != null ? [1] : [] + content { + authorized_ip_ranges = var.api_server_access_profile.authorized_ip_ranges + } + } dynamic "auto_scaler_profile" { for_each = var.auto_scaler_profile_enabled ? [var.auto_scaler_profile] : [] @@ -292,7 +289,6 @@ resource "azurerm_kubernetes_cluster" "aks" { dynamic "azure_active_directory_role_based_access_control" { for_each = var.role_based_access_control == null ? [] : var.role_based_access_control content { - # managed = azure_active_directory_role_based_access_control.value.managed tenant_id = azure_active_directory_role_based_access_control.value.tenant_id admin_group_object_ids = !azure_active_directory_role_based_access_control.value.azure_rbac_enabled ? var.admin_group_id : null azure_rbac_enabled = azure_active_directory_role_based_access_control.value.azure_rbac_enabled diff --git a/examples/complete/example.tf b/examples/complete/example.tf index d7719a6..2545ba9 100644 --- a/examples/complete/example.tf +++ b/examples/complete/example.tf @@ -1,11 +1,11 @@ provider "azurerm" { features {} - subscription_id = "068245d4-3c94-42fe-9c4d-9e5e1cabc60c" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } provider "azurerm" { features {} alias = "peer" - subscription_id = "068245d4-3c94-42fe-9c4d-9e5e1cabc60c" + subscription_id = "000000-11111-1223-XXX-XXXXXXXXXXXX" } data "azurerm_client_config" "current_client_config" {} @@ -14,8 +14,8 @@ module "resource_group" { source = "clouddrove/resource-group/azure" version = "1.0.2" - name = "Public-app" - environment = "test" + name = "Public-app1" + environment = "test2" label_order = ["name", "environment", ] location = "Canada Central" } @@ -24,8 +24,8 @@ module "vnet" { source = "clouddrove/vnet/azure" version = "1.0.4" - name = "app" - environment = "test" + name = "app1" + environment = "test2" label_order = ["name", "environment"] resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -34,10 +34,10 @@ module "vnet" { module "subnet" { source = "clouddrove/subnet/azure" - version = "1.2.0" + version = "1.2.1" - name = "app" - environment = "test" + name = "app1" + environment = "test2" label_order = ["name", "environment"] resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -50,7 +50,7 @@ module "subnet" { # route_table routes = [ { - name = "rt-test" + name = "rt_test" address_prefix = "0.0.0.0/0" next_hop_type = "Internet" } @@ -73,7 +73,7 @@ module "log-analytics" { module "vault" { source = "clouddrove/key-vault/azure" version = "1.2.0" - name = "vishal-012" + name = "vjsn-738" providers = { azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. azurerm.main_sub = azurerm @@ -103,7 +103,7 @@ module "vault" { module "aks" { source = "../../" - name = "app1-yum" + name = "app-yum" environment = "test" resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -116,7 +116,7 @@ module "aks" { max_pods = 200 os_disk_size_gb = 64 vm_size = "Standard_B4ms" - count = 1 + count = 3 node_public_ip_enabled = false auto_scaling_enabled = true min_count = 3 @@ -126,31 +126,31 @@ module "aks" { ##### if requred more than one node group. nodes_pools = [ { - name = "nodepool2" - max_pods = 30 - os_disk_size_gb = 64 - vm_size = "Standard_B4ms" - count = 2 - enable_node_public_ip = false - mode = "User" - auto_scaling_enabled = true - min_count = 3 - max_count = 5 + name = "nodepool2" + max_pods = 30 + os_disk_size_gb = 64 + vm_size = "Standard_B4ms" + count = 2 + node_public_ip_enabled = true + mode = "User" + auto_scaling_enabled = true + min_count = 3 + max_count = 5 node_labels = { "sfvfv" = "spot" } }, { - name = "spotnodepool" - max_pods = null - os_disk_size_gb = null - vm_size = "Standard_D2_v3" - count = 1 - enable_node_public_ip = false - mode = null - auto_scaling_enabled = true - min_count = 1 - max_count = 1 + name = "spotnodepool" + max_pods = null + os_disk_size_gb = null + vm_size = "Standard_D2_v3" + count = 1 + node_public_ip_enabled = false + mode = null + auto_scaling_enabled = true + min_count = 1 + max_count = 1 node_labels = { "dsvdv" = "spot" } diff --git a/examples/private_cluster/example.tf b/examples/private_cluster/example.tf index 8a426f0..691daa0 100644 --- a/examples/private_cluster/example.tf +++ b/examples/private_cluster/example.tf @@ -34,7 +34,7 @@ module "vnet" { module "subnet" { source = "clouddrove/subnet/azure" - version = "1.2.0" + version = "1.2.1" name = "app" environment = "test" diff --git a/examples/public_cluster/example.tf b/examples/public_cluster/example.tf index 83b6341..7523b6e 100644 --- a/examples/public_cluster/example.tf +++ b/examples/public_cluster/example.tf @@ -35,7 +35,7 @@ module "vnet" { module "subnet" { source = "clouddrove/subnet/azure" - version = "1.2.0" + version = "1.2.1" name = "app" environment = "test" diff --git a/locals.tf b/locals.tf index a7f96b6..e8fa4b7 100644 --- a/locals.tf +++ b/locals.tf @@ -1,9 +1,4 @@ -## Managed By : CloudDrove -## Copyright @ CloudDrove. All Right Reserved. - -## Vritual Network and Subnet Creation - data "azurerm_subscription" "current" {} data "azurerm_client_config" "current" {} @@ -45,11 +40,6 @@ locals { eviction_policy = null spot_max_price = null } - # default_spot_node_pool = { - # priority = "Spot" - # eviction_policy = "Delete" - # spot_max_price = -1 - # } default_node_pool = merge(local.default_agent_profile, var.default_node_pool) nodes_pools_with_defaults = [for ap in var.nodes_pools : merge(local.default_agent_profile, ap)] nodes_pools = [for ap in local.nodes_pools_with_defaults : ap.os_type == "Linux" ? merge(local.default_linux_node_profile, ap) : merge(local.default_windows_node_profile, ap)] @@ -59,13 +49,6 @@ locals { max_pods = 30 os_disk_size_gb = 128 } - - # default_spot_instanse = { - # priority = "Spot" - # eviction_policy = "Delete" - # spot_max_price = -1 - # } - # Defaults for Windows profile # Do not want to run same number of pods and some images can be quite large default_windows_node_profile = { diff --git a/node.tf b/node.tf index dfb2679..cc605f2 100644 --- a/node.tf +++ b/node.tf @@ -2,32 +2,30 @@ resource "azurerm_kubernetes_cluster_node_pool" "node_pools" { count = var.enabled ? length(var.nodes_pools) : 0 kubernetes_cluster_id = azurerm_kubernetes_cluster.aks[0].id - name = var.nodes_pools[count.index].name - vm_size = var.nodes_pools[count.index].vm_size - os_type = var.nodes_pools[count.index].os_type - os_disk_type = var.nodes_pools[count.index].os_disk_type - os_disk_size_gb = var.nodes_pools[count.index].os_disk_size_gb - vnet_subnet_id = var.nodes_pools[count.index].vnet_subnet_id - auto_scaling_enabled = var.nodes_pools[count.index].auto_scaling_enabled - host_encryption_enabled = var.nodes_pools[count.index].host_encryption_enabled - node_count = var.nodes_pools[count.index].count - min_count = var.nodes_pools[count.index].min_count - max_count = var.nodes_pools[count.index].max_count - max_pods = var.nodes_pools[count.index].max_pods - node_public_ip_enabled = var.nodes_pools[count.index].node_public_ip_enabled - mode = var.nodes_pools[count.index].mode - orchestrator_version = var.nodes_pools[count.index].orchestrator_version - node_taints = var.nodes_pools[count.index].node_taints - host_group_id = var.nodes_pools[count.index].host_group_id - node_labels = var.nodes_pools[count.index].node_labels + name = local.nodes_pools[count.index].name + vm_size = local.nodes_pools[count.index].vm_size + os_type = local.nodes_pools[count.index].os_type + os_disk_type = local.nodes_pools[count.index].os_disk_type + os_disk_size_gb = local.nodes_pools[count.index].os_disk_size_gb + vnet_subnet_id = local.nodes_pools[count.index].vnet_subnet_id + auto_scaling_enabled = local.nodes_pools[count.index].auto_scaling_enabled + host_encryption_enabled = local.nodes_pools[count.index].host_encryption_enabled + node_count = local.nodes_pools[count.index].count + min_count = local.nodes_pools[count.index].min_count + max_count = local.nodes_pools[count.index].max_count + max_pods = local.nodes_pools[count.index].max_pods + node_public_ip_enabled = local.nodes_pools[count.index].node_public_ip_enabled + mode = local.nodes_pools[count.index].mode + orchestrator_version = local.nodes_pools[count.index].orchestrator_version + node_taints = local.nodes_pools[count.index].node_taints + host_group_id = local.nodes_pools[count.index].host_group_id + node_labels = local.nodes_pools[count.index].node_labels capacity_reservation_group_id = var.capacity_reservation_group_id workload_runtime = var.workload_runtime zones = var.agents_availability_zones - - # Add the optional spot instance attributes with conditional expressions - priority = lookup(var.nodes_pools[count.index], "priority", null) - eviction_policy = lookup(var.nodes_pools[count.index], "eviction_policy", null) - spot_max_price = lookup(var.nodes_pools[count.index], "spot_max_price", null) + priority = local.nodes_pools[count.index].priority + eviction_policy = local.nodes_pools[count.index].eviction_policy + spot_max_price = local.nodes_pools[count.index].spot_max_price dynamic "kubelet_config" { for_each = var.kubelet_config != null ? [var.kubelet_config] : [] diff --git a/variables.tf b/variables.tf index 698d2df..e726577 100644 --- a/variables.tf +++ b/variables.tf @@ -670,15 +670,13 @@ variable "auto_scaler_profile" { } description = "Auto scaler profile configuration" } -# variable "api_server_access_profile" { -# type = object({ -# authorized_ip_ranges = optional(list(string)) -# #vnet_integration_enabled = optional(bool) -# #subnet_id = optional(string) -# }) -# default = null -# description = "Controlling the public and private exposure of a cluster please see the properties" -# } +variable "api_server_access_profile" { + type = object({ + authorized_ip_ranges = optional(list(string)) + }) + default = null + description = "Controlling the public and private exposure of a cluster please see the properties" +} variable "local_account_disabled" { type = bool default = false From 07c4df876bb98719b9a42278b6fc2c497ac7fce7 Mon Sep 17 00:00:00 2001 From: Vishal Sharma Date: Wed, 16 Oct 2024 18:10:14 +0530 Subject: [PATCH 6/7] fix: renamed resources as convention --- examples/complete/example.tf | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/complete/example.tf b/examples/complete/example.tf index 2545ba9..f4488cf 100644 --- a/examples/complete/example.tf +++ b/examples/complete/example.tf @@ -14,8 +14,8 @@ module "resource_group" { source = "clouddrove/resource-group/azure" version = "1.0.2" - name = "Public-app1" - environment = "test2" + name = "Public-app" + environment = "test" label_order = ["name", "environment", ] location = "Canada Central" } @@ -24,8 +24,8 @@ module "vnet" { source = "clouddrove/vnet/azure" version = "1.0.4" - name = "app1" - environment = "test2" + name = "app" + environment = "test" label_order = ["name", "environment"] resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location @@ -36,8 +36,8 @@ module "subnet" { source = "clouddrove/subnet/azure" version = "1.2.1" - name = "app1" - environment = "test2" + name = "app" + environment = "test" label_order = ["name", "environment"] resource_group_name = module.resource_group.resource_group_name location = module.resource_group.resource_group_location From d432e7f0c783a6c64168c2c3e5381ff2d43d0a5a Mon Sep 17 00:00:00 2001 From: Vedant Date: Tue, 22 Oct 2024 02:16:27 +0530 Subject: [PATCH 7/7] fix:added private endpoiny --- aks.tf | 154 +++++++++++++++++++++++++++++++++++ examples/complete/example.tf | 32 ++++++-- variables.tf | 58 +++++++++++++ versions.tf | 5 +- 4 files changed, 241 insertions(+), 8 deletions(-) diff --git a/aks.tf b/aks.tf index a30f113..8275da6 100644 --- a/aks.tf +++ b/aks.tf @@ -427,3 +427,157 @@ resource "azurerm_kubernetes_cluster" "aks" { tags = module.labels.tags } + +##----------------------------------------------------------------------------- +##Below resource will deploy private endpoint for AKS. +##----------------------------------------------------------------------------- +resource "azurerm_private_endpoint" "pep" { + provider = azurerm.main_sub + count = var.enabled && var.enable_private_endpoint ? 1 : 0 + + name = format("%s-pe-akc", module.labels.id) + location = var.location + resource_group_name = var.resource_group_name + subnet_id = var.subnet_id + tags = module.labels.tags + private_service_connection { + name = format("%s-psc-akc", module.labels.id) + is_manual_connection = false + private_connection_resource_id = azurerm_kubernetes_cluster.aks[0].id + subresource_names = ["aks"] + } + lifecycle { + ignore_changes = [ + tags, + ] + } +} + +##----------------------------------------------------------------------------- +## Data block to retreive private ip of private endpoint. +##----------------------------------------------------------------------------- +data "azurerm_private_endpoint_connection" "private-ip" { + provider = azurerm.main_sub + count = var.enabled && var.enable_private_endpoint ? 1 : 0 + name = azurerm_private_endpoint.pep[0].name + resource_group_name = var.resource_group_name +} + +##----------------------------------------------------------------------------- +## Below resource will create private dns zone in your azure subscription. +## Will be created only when there is no existing private dns zone and private endpoint is enabled. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_zone" "dnszone" { + provider = azurerm.main_sub + count = var.enabled && var.existing_private_dns_zone == null && var.enable_private_endpoint ? 1 : 0 + name = "privatelink.kubernates.cluster.windows.net" + resource_group_name = var.resource_group_name + tags = module.labels.tags +} + +##----------------------------------------------------------------------------- +## Below resource will create vnet link in private dns. +## Vnet link will be created when there is no existing private dns zone or existing private dns zone is in same subscription. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_zone_virtual_network_link" "vent-link" { + provider = azurerm.main_sub + count = var.enabled && var.enable_private_endpoint && var.diff_sub == false ? 1 : 0 + + name = var.existing_private_dns_zone == null ? format("%s-pdz-vnet-link-akc", module.labels.id) : format("%s-pdz-vnet-link-akc-1", module.labels.id) + resource_group_name = local.valid_rg_name + private_dns_zone_name = local.private_dns_zone_name + virtual_network_id = var.virtual_network_id + tags = module.labels.tags +} + +##----------------------------------------------------------------------------- +## Below resource will create vnet link in existing private dns zone. +## Vnet link will be created when existing private dns zone is in different subscription. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_zone_virtual_network_link" "vent-link-1" { + provider = azurerm.dns_sub + count = var.enabled && var.enable_private_endpoint && var.diff_sub == true ? 1 : 0 + name = var.existing_private_dns_zone == null ? format("%s-pdz-vnet-link-akc", module.labels.id) : format("%s-pdz-vnet-link-akc-1", module.labels.id) + resource_group_name = local.valid_rg_name + private_dns_zone_name = local.private_dns_zone_name + virtual_network_id = var.virtual_network_id + tags = module.labels.tags +} + +##----------------------------------------------------------------------------- +## Below resource will create vnet link in existing private dns zone. +## Vnet link will be created when existing private dns zone is in different subscription. +## This resource is deployed when more than 1 vnet link is required and module can be called again to do so without deploying other AKS resources. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_zone_virtual_network_link" "vent-link-diff-subs" { + provider = azurerm.dns_sub + count = var.enabled && var.multi_sub_vnet_link && var.existing_private_dns_zone != null ? 1 : 0 + + name = format("%s-pdz-vnet-link-akc-1", module.labels.id) + resource_group_name = var.existing_private_dns_zone_resource_group_name + private_dns_zone_name = var.existing_private_dns_zone + virtual_network_id = var.virtual_network_id + tags = module.labels.tags +} + +##----------------------------------------------------------------------------- +## Below resource will create vnet link in private dns zone. +## Below resource will be created when extra vnet link is required in dns zone in same subscription. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_zone_virtual_network_link" "addon_vent_link" { + provider = azurerm.main_sub + count = var.enabled && var.addon_vent_link ? 1 : 0 + + name = format("%s-pdz-vnet-link-akc-addon", module.labels.id) + resource_group_name = var.addon_resource_group_name + private_dns_zone_name = var.existing_private_dns_zone == null ? azurerm_private_dns_zone.dnszone[0].name : var.existing_private_dns_zone + virtual_network_id = var.addon_virtual_network_id + tags = module.labels.tags +} + +##----------------------------------------------------------------------------- +## Below resource will create dns A record for private ip of private endpoint in private dns zone. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_a_record" "arecord" { + provider = azurerm.main_sub + count = var.enabled && var.enable_private_endpoint && var.diff_sub == false ? 1 : 0 + + name = azurerm_kubernetes_cluster.aks[0].name + zone_name = local.private_dns_zone_name + resource_group_name = local.valid_rg_name + ttl = 3600 + records = [data.azurerm_private_endpoint_connection.private-ip[0].private_service_connection[0].private_ip_address] + tags = module.labels.tags + lifecycle { + ignore_changes = [ + tags, + ] + } +} + +##----------------------------------------------------------------------------- +## Below resource will create dns A record for private ip of private endpoint in private dns zone. +## This resource will be created when private dns is in different subscription. +##----------------------------------------------------------------------------- +resource "azurerm_private_dns_a_record" "arecord-1" { + provider = azurerm.dns_sub + count = var.enabled && var.enable_private_endpoint && var.diff_sub == true ? 1 : 0 + + + name = azurerm_kubernetes_cluster.aks[0].name + zone_name = local.private_dns_zone_name + resource_group_name = local.valid_rg_name + ttl = 3600 + records = [data.azurerm_private_endpoint_connection.private-ip[0].private_service_connection[0].private_ip_address] + tags = module.labels.tags + lifecycle { + ignore_changes = [ + tags, + ] + } +} + +locals { + valid_rg_name = var.existing_private_dns_zone == null ? var.resource_group_name : var.existing_private_dns_zone_resource_group_name + private_dns_zone_name = var.enable_private_endpoint ? var.existing_private_dns_zone == null ? azurerm_private_dns_zone.dnszone[0].name : var.existing_private_dns_zone : null +} \ No newline at end of file diff --git a/examples/complete/example.tf b/examples/complete/example.tf index f4488cf..7547dcf 100644 --- a/examples/complete/example.tf +++ b/examples/complete/example.tf @@ -73,7 +73,7 @@ module "log-analytics" { module "vault" { source = "clouddrove/key-vault/azure" version = "1.2.0" - name = "vjsn-738" + name = "vjsn-738112" providers = { azurerm.dns_sub = azurerm.peer, #change this to other alias if dns hosted in other subscription. azurerm.main_sub = azurerm @@ -102,11 +102,18 @@ module "vault" { } module "aks" { - source = "../../" - name = "app-yum" - environment = "test" - resource_group_name = module.resource_group.resource_group_name - location = module.resource_group.resource_group_location + source = "../../" + providers = { + azurerm.dns_sub = azurerm.peer, #chagnge this to other alias if dns hosted in other subscription. + azurerm.main_sub = azurerm + } + name = "app-yum" + enable_private_endpoint = true + environment = "test" + resource_group_name = module.resource_group.resource_group_name + location = module.resource_group.resource_group_location + virtual_network_id = module.vnet.vnet_id + subnet_id = module.subnet.default_subnet_id[0] kubernetes_version = "1.28.9" private_cluster_enabled = false @@ -181,3 +188,16 @@ output "test1" { output "test" { value = module.aks.nodes_pools } + +########Following to be uncommnented only when using DNS Zone from different subscription along with existing DNS zone. + +# diff_sub = true +# alias = "" +# alias_sub = "" + +#########Following to be uncommmented when using DNS zone from different resource group or different subscription. +# existing_private_dns_zone = "privatelink.vaultcore.azure.net" +# existing_private_dns_zone_resource_group_name = "dns-rg" + +#### enable diagnostic setting +## when diagnostic_setting_enable enable, add log analytics workspace id diff --git a/variables.tf b/variables.tf index e726577..fe4d29f 100644 --- a/variables.tf +++ b/variables.tf @@ -1057,3 +1057,61 @@ variable "flux_retry_interval_in_seconds" { description = "The interval at which to re-reconcile the kustomization on the cluster in the event of failure on reconciliation." default = 600 } + +variable "addon_vent_link" { + type = bool + default = false + description = "The name of the addon vnet " +} + +variable "multi_sub_vnet_link" { + type = bool + default = false + description = "Flag to control creation of vnet link for dns zone in different subscription" +} + +variable "enable_private_endpoint" { + type = bool + default = true + description = "Manages a Private Endpoint to Azure database for MySQL" +} + +variable "existing_private_dns_zone" { + type = string + default = null + description = "Name of the existing private DNS zone" +} + +variable "diff_sub" { + # To be set true when hosted DNS zone is in different subnscription. + type = bool + default = false + description = "Flag to tell whether dns zone is in different sub or not." +} +variable "addon_virtual_network_id" { + type = string + default = "" + description = "The name of the addon vnet link vnet id" +} + +variable "addon_resource_group_name" { + type = string + default = "" + description = "The name of the addon vnet resource group" +} +variable "subnet_id" { + type = string + default = "" + description = "The resource ID of the subnet" +} +variable "existing_private_dns_zone_resource_group_name" { + type = string + default = "" + description = "The name of the existing resource group" +} + +variable "virtual_network_id" { + type = string + default = "" + description = "The name of the virtual network" +} \ No newline at end of file diff --git a/versions.tf b/versions.tf index b2272c2..a4dbfdb 100644 --- a/versions.tf +++ b/versions.tf @@ -7,8 +7,9 @@ terraform { terraform { required_providers { azurerm = { - source = "hashicorp/azurerm" - version = ">= 4.0.1" + source = "hashicorp/azurerm" + version = ">= 4.0.1" + configuration_aliases = [azurerm.main_sub, azurerm.dns_sub] } } }