AKS 集群正在创建额外的路由 table、额外的 NSG 并且它不使用现有的自定义路由 table 和 NSG
AKS cluster creating additional route table, additional NSG and its not using existing custom route table and NSG
我最近开始使用 Azure 开发 Terraform。作为其中的一部分,我已经尽可能多地浏览了文档以创建资源。在此我试图创建 AKS 集群。我能够成功创建。但我很少担心(我觉得有问题,如果我错了请更正)。
目前在提供 AKS 集群期间,aks-engine 会自动创建路由 table 和网络安全。我在 terraform 文档中看不到任何阻止这些的选项。因为我想使用现有的 route-table 和 NSG 因为我在公司网络下。
预期行为:ASK 应该选择现有路线 table 和 NSG,而不是创建新资源。
这是我的代码片段。请帮助我。
provider "azurerm" {
version = "~> 2.15"
features {}
}
resource "azurerm_resource_group" "aks" {
name = var.resource_group
location = var.location
}
#fetch existing subnet
data "azurerm_subnet" "aks" {
name = var.subnetname
virtual_network_name = var.virtual_network_name
resource_group_name = var.vnet_resource_group
}
resource "azurerm_subnet_network_security_group_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
network_security_group_id = var.network_security_group
}
resource "azurerm_route_table" "aks"{
name = var.subnetname
resource_group_name = azurerm_resource_group.aks.name
location = azurerm_resource_group.aks.location
disable_bgp_route_propagation = false
route{
name = var.route_name
address_prefix = var.route_address_prefix
next_hop_type = var.route_next_hop_type
}
}
resource "azurerm_subnet_route_table_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.aks.id
}
resource "azurerm_kubernetes_cluster" "aks" {
name = azurerm_resource_group.aks.name
resource_group_name = azurerm_resource_group.aks.name
location = azurerm_resource_group.aks.location
dns_prefix = "akstfpoc" #The dns_prefix must contain between 3 and 45 characters, and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number.
kubernetes_version = "1.15.10"
private_cluster_enabled = false
node_resource_group = var.node_resource_group
api_server_authorized_ip_ranges = [] #var.api_server_authorized_ip_ranges
default_node_pool {
enable_node_public_ip = false
name = "agentpool"
node_count = var.node_count
orchestrator_version = "1.15.10"
vm_size = var.vm_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = data.azurerm_subnet.aks.id
}
linux_profile {
admin_username = var.admin_username
ssh_key {
key_data = var.ssh_key
}
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = "kubenet"
#dns_service_ip = "172.17.1.10"
#service_cidr = "172.16.0.0/14"
pod_cidr = "172.40.0.0/16"
network_policy = "calico"
outbound_type = "loadBalancer"
load_balancer_sku = "Standard"
# load_balancer_profile {
# managed_outbound_ip_count = []
# outbound_ip_address_ids = []
# outbound_ip_prefix_ids = [""]
# }
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
kube_dashboard {
enabled = false
}
oms_agent {
enabled = false
}
}
}
要使用私有子网,您应该使用
network_plugin = "蔚蓝"
这将允许您将您的 AKS 集群绑定到一个预先存在的子网,您将完全控制路由 Table 和 NSG(您可以预先创建它们并使用 Terraform 将它们与您的子网相关联)
查看有关如何使用 azure-cli 的文档
https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni
对于 Terraform,您需要设置
vnet_subnet_id 在 default_agent_pool 上:https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster.html#vnet_subnet_id
https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster.html#network_profile
我最近开始使用 Azure 开发 Terraform。作为其中的一部分,我已经尽可能多地浏览了文档以创建资源。在此我试图创建 AKS 集群。我能够成功创建。但我很少担心(我觉得有问题,如果我错了请更正)。
目前在提供 AKS 集群期间,aks-engine 会自动创建路由 table 和网络安全。我在 terraform 文档中看不到任何阻止这些的选项。因为我想使用现有的 route-table 和 NSG 因为我在公司网络下。 预期行为:ASK 应该选择现有路线 table 和 NSG,而不是创建新资源。 这是我的代码片段。请帮助我。
provider "azurerm" {
version = "~> 2.15"
features {}
}
resource "azurerm_resource_group" "aks" {
name = var.resource_group
location = var.location
}
#fetch existing subnet
data "azurerm_subnet" "aks" {
name = var.subnetname
virtual_network_name = var.virtual_network_name
resource_group_name = var.vnet_resource_group
}
resource "azurerm_subnet_network_security_group_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
network_security_group_id = var.network_security_group
}
resource "azurerm_route_table" "aks"{
name = var.subnetname
resource_group_name = azurerm_resource_group.aks.name
location = azurerm_resource_group.aks.location
disable_bgp_route_propagation = false
route{
name = var.route_name
address_prefix = var.route_address_prefix
next_hop_type = var.route_next_hop_type
}
}
resource "azurerm_subnet_route_table_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.aks.id
}
resource "azurerm_kubernetes_cluster" "aks" {
name = azurerm_resource_group.aks.name
resource_group_name = azurerm_resource_group.aks.name
location = azurerm_resource_group.aks.location
dns_prefix = "akstfpoc" #The dns_prefix must contain between 3 and 45 characters, and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number.
kubernetes_version = "1.15.10"
private_cluster_enabled = false
node_resource_group = var.node_resource_group
api_server_authorized_ip_ranges = [] #var.api_server_authorized_ip_ranges
default_node_pool {
enable_node_public_ip = false
name = "agentpool"
node_count = var.node_count
orchestrator_version = "1.15.10"
vm_size = var.vm_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = data.azurerm_subnet.aks.id
}
linux_profile {
admin_username = var.admin_username
ssh_key {
key_data = var.ssh_key
}
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = "kubenet"
#dns_service_ip = "172.17.1.10"
#service_cidr = "172.16.0.0/14"
pod_cidr = "172.40.0.0/16"
network_policy = "calico"
outbound_type = "loadBalancer"
load_balancer_sku = "Standard"
# load_balancer_profile {
# managed_outbound_ip_count = []
# outbound_ip_address_ids = []
# outbound_ip_prefix_ids = [""]
# }
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
kube_dashboard {
enabled = false
}
oms_agent {
enabled = false
}
}
}
要使用私有子网,您应该使用
network_plugin = "蔚蓝"
这将允许您将您的 AKS 集群绑定到一个预先存在的子网,您将完全控制路由 Table 和 NSG(您可以预先创建它们并使用 Terraform 将它们与您的子网相关联)
查看有关如何使用 azure-cli 的文档
https://docs.microsoft.com/en-us/azure/aks/configure-azure-cni
对于 Terraform,您需要设置
vnet_subnet_id 在 default_agent_pool 上:https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster.html#vnet_subnet_id
https://www.terraform.io/docs/providers/azurerm/r/kubernetes_cluster.html#network_profile