-
Notifications
You must be signed in to change notification settings - Fork 280
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Container Load Balancer - Network Security Group (NSG) #8054
base: master
Are you sure you want to change the base?
Changes from 4 commits
66de73a
3f0f834
2ff6f2d
182d11d
056ae46
8e0ad77
329464c
9e334c0
9141356
f841903
873212a
90d3039
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
package fixture | ||
|
||
import ( | ||
armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v6" | ||
"k8s.io/utils/ptr" | ||
) | ||
|
||
func (f *AzureFixture) ManagedCluster() *AzureManagedClusterFixture { | ||
return &AzureManagedClusterFixture{ | ||
mc: &armcontainerservice.ManagedCluster{ | ||
Name: ptr.To("mangaedcluster"), | ||
georgeedward2000 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
Properties: &armcontainerservice.ManagedClusterProperties{ | ||
NetworkProfile: &armcontainerservice.NetworkProfile{ | ||
PodCidrs: []*string{ptr.To("192.1.0.0/16")}, | ||
}, | ||
}, | ||
}, | ||
} | ||
} | ||
|
||
type AzureManagedClusterFixture struct { | ||
mc *armcontainerservice.ManagedCluster | ||
} | ||
|
||
func (f *AzureManagedClusterFixture) Build() *armcontainerservice.ManagedCluster { | ||
return f.mc | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3063,48 +3063,84 @@ func (az *Cloud) reconcileSecurityGroup( | |
} | ||
|
||
var ( | ||
disableFloatingIP = consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) | ||
lbIPAddresses, _ = iputil.ParseAddresses(lbIPs) | ||
lbIPv4Addresses, lbIPv6Addresses = iputil.GroupAddressesByFamily(lbIPAddresses) | ||
additionalIPv4Addresses, additionalIPv6Addresses = iputil.GroupAddressesByFamily(additionalIPs) | ||
backendIPv4Addresses, backendIPv6Addresses []netip.Addr | ||
dstIPv4Addresses, dstIPv6Addresses []netip.Addr | ||
dstIpv4AddressPrefix, dstIpv6AddressPrefix []netip.Prefix | ||
) | ||
{ | ||
// Get backend node IPs | ||
lb, lbFound, err := az.getAzureLoadBalancer(ctx, lbName, azcache.CacheReadTypeDefault) | ||
{ | ||
|
||
if az.IsLBBackendPoolTypePodIP() { | ||
if !az.RetrievedClusterPodCidr { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Generally, this would block the security boundary of the Kubernetes cluster. The cluster identity should not have the permission to manage itself. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. let's use new cloud config options and ask the provisioning service (e.g. AKS or capz) to setup the CIDRs on changing. |
||
mcClient := az.NetworkClientFactory.GetManagedClusterClient() | ||
managedCluster, err := mcClient.Get(ctx, az.ResourceGroup, clusterName) | ||
if err != nil { | ||
return nil, err | ||
return nil, fmt.Errorf("failed to get managed cluster: %w", err) | ||
} | ||
|
||
if managedCluster.Properties == nil || managedCluster.Properties.NetworkProfile == nil || | ||
managedCluster.Properties.NetworkProfile.PodCidrs == nil { | ||
klog.Errorf("Failed to get PodCidrs for cluster %q", clusterName) | ||
return nil, fmt.Errorf("failed to get PodCidrs for cluster %q", clusterName) | ||
} | ||
if wantLb && !lbFound { | ||
logger.Error(err, "Failed to get load balancer") | ||
return nil, fmt.Errorf("unable to get lb %s", lbName) | ||
podCidrs := managedCluster.Properties.NetworkProfile.PodCidrs | ||
if len(podCidrs) == 0 { | ||
klog.Errorf("Failed to get PodCidrs for cluster %q", clusterName) | ||
return nil, fmt.Errorf("failed to get PodCidrs for cluster %q", clusterName) | ||
} | ||
for _, podCidr := range podCidrs { | ||
prefix, parseErr := netip.ParsePrefix(*podCidr) | ||
if parseErr != nil { | ||
klog.Errorf("Failed to parse PodCidr %q: %v", *podCidr, parseErr) | ||
return nil, fmt.Errorf("failed to parse PodCidr %q: %w", *podCidr, parseErr) | ||
} | ||
if prefix.Addr().Is4() { | ||
dstIpv4AddressPrefix = append(dstIpv4AddressPrefix, prefix) | ||
az.PodCidrIPv4 = prefix | ||
} else { | ||
dstIpv6AddressPrefix = append(dstIpv6AddressPrefix, prefix) | ||
az.PodCidrIPv6 = prefix | ||
} | ||
} | ||
az.RetrievedClusterPodCidr = true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can podcidrs change during cluster operations (eg. adding new node pool)? If yes, then do we need to handle this differently to account for potential new cidrs? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. From what I understand, the pod subnet (hence the cidr) is set during cluster's deployment and cannot be updated during its lifetime. Please confirm @kartickmsft There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think that can happen when completely new node pool is added: https://learn.microsoft.com/en-us/azure/aks/create-node-pools#add-a-node-pool-with-a-unique-subnet There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. POD subnet can be newly configured for a new nodepool for Azure CNI Dynamic IP allocation and Enhanced subnet option (https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni-dynamic-ip-allocation#adding-node-pool). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, it may change. We'd need to figure out a way to get notified on such changes There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
This may be not enough as there may be down times when new IPs are picked while they are blocked by NSG. |
||
} else { | ||
dstIpv4AddressPrefix = []netip.Prefix{az.PodCidrIPv4} | ||
dstIpv6AddressPrefix = []netip.Prefix{az.PodCidrIPv6} | ||
} | ||
} else { | ||
var ( | ||
disableFloatingIP = consts.IsK8sServiceDisableLoadBalancerFloatingIP(service) | ||
lbIPAddresses, _ = iputil.ParseAddresses(lbIPs) | ||
lbIPv4Addresses, lbIPv6Addresses = iputil.GroupAddressesByFamily(lbIPAddresses) | ||
additionalIPv4Addresses, additionalIPv6Addresses = iputil.GroupAddressesByFamily(additionalIPs) | ||
backendIPv4Addresses, backendIPv6Addresses []netip.Addr | ||
) | ||
// Get backend node IPs | ||
lb, lbFound, err := az.getAzureLoadBalancer(ctx, lbName, azcache.CacheReadTypeDefault) | ||
if err != nil { | ||
return nil, err | ||
} | ||
if wantLb && !lbFound { | ||
logger.Error(err, "Failed to get load balancer") | ||
return nil, fmt.Errorf("unable to get lb %s", lbName) | ||
} | ||
var backendIPv4List, backendIPv6List []string | ||
if lbFound { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For POD IP based backendpool, we are not populating the POD IPs in the NSG. Rather, the POD subnet. So, we could skip GetBackendPrivateIPs for POD IP based Backend pool. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is part of the initial flow - node ip based backendpool It is part of the block that is on the else branch of : |
||
backendIPv4List, backendIPv6List = az.LoadBalancerBackendPool.GetBackendPrivateIPs(ctx, clusterName, service, lb) | ||
} | ||
backendIPv4Addresses, _ = iputil.ParseAddresses(backendIPv4List) | ||
backendIPv6Addresses, _ = iputil.ParseAddresses(backendIPv6List) | ||
} | ||
|
||
var ( | ||
dstIPv4Addresses = additionalIPv4Addresses | ||
dstIPv6Addresses = additionalIPv6Addresses | ||
) | ||
|
||
if disableFloatingIP { | ||
// use the backend node IPs | ||
dstIPv4Addresses = append(dstIPv4Addresses, backendIPv4Addresses...) | ||
dstIPv6Addresses = append(dstIPv6Addresses, backendIPv6Addresses...) | ||
} else { | ||
// use the LoadBalancer IPs | ||
dstIPv4Addresses = append(dstIPv4Addresses, lbIPv4Addresses...) | ||
dstIPv6Addresses = append(dstIPv6Addresses, lbIPv6Addresses...) | ||
} | ||
if disableFloatingIP { | ||
// use the backend node IPs | ||
dstIPv4Addresses = append(dstIPv4Addresses, backendIPv4Addresses...) | ||
dstIPv6Addresses = append(dstIPv6Addresses, backendIPv6Addresses...) | ||
} else { | ||
// use the LoadBalancer IPs | ||
dstIPv4Addresses = append(dstIPv4Addresses, lbIPv4Addresses...) | ||
dstIPv6Addresses = append(dstIPv6Addresses, lbIPv6Addresses...) | ||
} | ||
|
||
{ | ||
retainPortRanges, err := az.listSharedIPPortMapping(ctx, service, append(dstIPv4Addresses, dstIPv6Addresses...)) | ||
if err != nil { | ||
logger.Error(err, "Failed to list retain port ranges") | ||
|
@@ -3118,7 +3154,7 @@ func (az *Cloud) reconcileSecurityGroup( | |
} | ||
|
||
if wantLb { | ||
err := accessControl.PatchSecurityGroup(dstIPv4Addresses, dstIPv6Addresses) | ||
err := accessControl.PatchSecurityGroup(dstIPv4Addresses, dstIPv6Addresses, dstIpv4AddressPrefix, dstIpv6AddressPrefix) | ||
if err != nil { | ||
logger.Error(err, "Failed to patch security group") | ||
return nil, err | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you add some details about the change in the PR description?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
updated the PR description