Skip to content

Commit

Permalink
fix: fix get null podCIDR and serviceCIDR
Browse files Browse the repository at this point in the history
Signed-off-by: ruochen <[email protected]>

fix: fix get null podCIDR and serviceCIDR

Signed-off-by: ruochen <[email protected]>
  • Loading branch information
0x0034 committed Dec 27, 2024
1 parent 7fd26d8 commit 84b8283
Show file tree
Hide file tree
Showing 4 changed files with 182 additions and 23 deletions.
63 changes: 41 additions & 22 deletions pkg/coordinatormanager/coordinator_informer.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
"time"

"github.com/cilium/cilium/pkg/ipam/option"
v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
"github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
cilium_externalversions "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions"
ciliumLister "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1"
Expand Down Expand Up @@ -465,15 +465,30 @@ func (cc *CoordinatorController) updatePodAndServerCIDR(ctx context.Context, log

var cm corev1.ConfigMap
var k8sPodCIDR, k8sServiceCIDR []string
if err := cc.APIReader.Get(ctx, types.NamespacedName{Namespace: metav1.NamespaceSystem, Name: "kubeadm-config"}, &cm); err == nil {
logger.Sugar().Infof("Trying to fetch the ClusterCIDR from kube-system/kubeadm-config")
k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKubeadmConfigMap(&cm)
logger.Sugar().Infof("kubeadm-config configMap k8sPodCIDR %v, k8sServiceCIDR %v", k8sPodCIDR, k8sServiceCIDR)
} else {
// try to get ClusterCIDR from kubeadm-config ConfigMap
err = cc.APIReader.Get(ctx, types.NamespacedName{
Namespace: metav1.NamespaceSystem,
Name: "kubeadm-config",
}, &cm)

if err == nil {
logger.Sugar().Info("Trying to fetch the ClusterCIDR from kube-system/kubeadm-config")
k8sPodCIDR, k8sServiceCIDR, err = ExtractK8sCIDRFromKubeadmConfigMap(&cm)
if err == nil {
// Success to get ClusterCIDR from kubeadm-config
logger.Sugar().Infof("Success get CIDR from kubeadm-config: PodCIDR=%v, ServiceCIDR=%v", k8sPodCIDR, k8sServiceCIDR)
} else {
logger.Sugar().Warnf("Failed get CIDR from kubeadm-config: %v", err)
}
}

// if kubeadm-config ConfigMap not found, try to get ClusterCIDR from kube-controller-manager Pod
if len(k8sPodCIDR) == 0 || len(k8sServiceCIDR) == 0 {
logger.Sugar().Warnf("failed to get kube-system/kubeadm-config: %v, trying to fetch the ClusterCIDR from kube-controller-manager", err)
var cmPodList corev1.PodList
err = cc.APIReader.List(ctx, &cmPodList, client.MatchingLabels{"component": "kube-controller-manager"})
if err != nil {
var podList corev1.PodList
listOptions := client.MatchingLabels{"component": "kube-controller-manager"}

if err := cc.APIReader.List(ctx, &podList, listOptions); err != nil {
logger.Sugar().Errorf("failed to get kube-controller-manager Pod with label \"component: kube-controller-manager\": %v", err)
event.EventRecorder.Eventf(
coordCopy,
Expand All @@ -485,14 +500,14 @@ func (cc *CoordinatorController) updatePodAndServerCIDR(ctx context.Context, log
return coordCopy
}

if len(cmPodList.Items) == 0 {
if len(podList.Items) == 0 {
errMsg := "No kube-controller-manager pod found, unable to get clusterCIDR"
logger.Error(errMsg)
setStatus2NoReady(logger, errMsg, coordCopy)
return coordCopy
}

k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKCMPod(&cmPodList.Items[0])
k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKCMPod(&podList.Items[0])
logger.Sugar().Infof("kube-controller-manager k8sPodCIDR %v, k8sServiceCIDR %v", k8sPodCIDR, k8sServiceCIDR)
}

Expand Down Expand Up @@ -757,20 +772,23 @@ func (cc *CoordinatorController) updateServiceCIDR(logger *zap.Logger, coordCopy
return nil
}

func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []string) {
func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []string, error) {
var podCIDR, serviceCIDR []string

podReg := regexp.MustCompile(`podSubnet: (.*)`)
serviceReg := regexp.MustCompile(`serviceSubnet: (.*)`)

var podSubnets, serviceSubnets []string
for _, data := range cm.Data {
podSubnets = podReg.FindStringSubmatch(data)
serviceSubnets = serviceReg.FindStringSubmatch(data)
clusterConfig, exists := cm.Data["ClusterConfiguration"]
if !exists {
return podCIDR, serviceCIDR, fmt.Errorf("unable to get kubeadm configmap ClusterConfiguration")
}

if len(podSubnets) != 0 {
podReg := regexp.MustCompile(`podSubnet:\s*(\S+)`)
serviceReg := regexp.MustCompile(`serviceSubnet:\s*(\S+)`)

podSubnets := podReg.FindStringSubmatch(clusterConfig)
serviceSubnets := serviceReg.FindStringSubmatch(clusterConfig)

if len(podSubnets) > 1 {
for _, cidr := range strings.Split(podSubnets[1], ",") {
cidr = strings.TrimSpace(cidr)
_, _, err := net.ParseCIDR(cidr)
if err != nil {
continue
Expand All @@ -779,8 +797,9 @@ func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []strin
}
}

if len(serviceSubnets) != 0 {
if len(serviceSubnets) > 1 {
for _, cidr := range strings.Split(serviceSubnets[1], ",") {
cidr = strings.TrimSpace(cidr)
_, _, err := net.ParseCIDR(cidr)
if err != nil {
continue
Expand All @@ -789,7 +808,7 @@ func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []strin
}
}

return podCIDR, serviceCIDR
return podCIDR, serviceCIDR, nil
}

func ExtractK8sCIDRFromKCMPod(kcm *corev1.Pod) ([]string, []string) {
Expand Down
123 changes: 123 additions & 0 deletions pkg/coordinatormanager/coordinator_informer_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
// Copyright 2022 Authors of spidernet-io
// SPDX-License-Identifier: Apache-2.0

package coordinatormanager

import (
"encoding/json"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
)

var _ = Describe("Coordinator Manager", Label("unittest", "informer_test"), Serial, func() {

var (
clusterConfigurationInOneLineCm *corev1.ConfigMap
clusterConfigurationJsonCm *corev1.ConfigMap
noClusterConfigurationJsonCm *corev1.ConfigMap
noCIDRJsonCm *corev1.ConfigMap
)

BeforeEach(func() {
clusterConfigurationInOneLineJson := `
{"apiVersion":"v1","data":{"ClusterConfiguration":"apiServer:\n certSANs:\n - 127.0.0.1\n - apiserver.cluster.local\n - 10.103.97.2\n - 192.168.165.128\n extraArgs:\n audit-log-format: json\n audit-log-maxage: \"7\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n audit-log-path: /var/log/kubernetes/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yml\n authorization-mode: Node,RBAC\n enable-aggregator-routing: \"true\"\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/kubernetes\n mountPath: /etc/kubernetes\n name: audit\n pathType: DirectoryOrCreate\n - hostPath: /var/log/kubernetes\n mountPath: /var/log/kubernetes\n name: audit-log\n pathType: DirectoryOrCreate\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta2\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\ncontrollerManager:\n extraArgs:\n bind-address: 0.0.0.0\n cluster-signing-duration: 876000h\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\ndns:\n type: CoreDNS\netcd:\n local:\n dataDir: /var/lib/etcd\n extraArgs:\n listen-metrics-urls: http://0.0.0.0:2381\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.21.14\nnetworking:\n dnsDomain: cluster.local\n podSubnet: 192.168.165.0/24\n serviceSubnet: 245.100.128.0/18\nscheduler:\n extraArgs:\n bind-address: 0.0.0.0\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n","ClusterStatus":"apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"},"kind":"ConfigMap","metadata":{"name":"kubeadm-config","namespace":"kube-system"}}`
clusterConfigurationJson := `{
"apiVersion": "v1",
"data": {
"ClusterConfiguration": "apiServer:\n certSANs:\n - 127.0.0.1\n - apiserver.cluster.local\n - 10.103.97.2\n - 192.168.165.128\n extraArgs:\n audit-log-format: json\n audit-log-maxage: \"7\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n audit-log-path: /var/log/kubernetes/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yml\n authorization-mode: Node,RBAC\n enable-aggregator-routing: \"true\"\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/kubernetes\n mountPath: /etc/kubernetes\n name: audit\n pathType: DirectoryOrCreate\n - hostPath: /var/log/kubernetes\n mountPath: /var/log/kubernetes\n name: audit-log\n pathType: DirectoryOrCreate\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta2\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\ncontrollerManager:\n extraArgs:\n bind-address: 0.0.0.0\n cluster-signing-duration: 876000h\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\ndns:\n type: CoreDNS\netcd:\n local:\n dataDir: /var/lib/etcd\n extraArgs:\n listen-metrics-urls: http://0.0.0.0:2381\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.21.14\nnetworking:\n dnsDomain: cluster.local\n podSubnet: 192.168.165.0/24\n serviceSubnet: 245.100.128.0/18\nscheduler:\n extraArgs:\n bind-address: 0.0.0.0\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n",
"ClusterStatus": "apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"
},
"kind": "ConfigMap",
"metadata": {
"name": "kubeadm-config",
"namespace": "kube-system"
}
}`
noClusterConfigurationJson := `{
"apiVersion": "v1",
"data": {
"ClusterStatus": "apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"
},
"kind": "ConfigMap",
"metadata": {
"name": "kubeadm-config",
"namespace": "kube-system"
}
}`
noCIDRJson := `{
"apiVersion": "v1",
"data": {
"ClusterConfiguration": "apiServer:\n certSANs:\n - 127.0.0.1\n - apiserver.cluster.local\n - 10.103.97.2\n - 192.168.165.128\n extraArgs:\n audit-log-format: json\n audit-log-maxage: \"7\"\n audit-log-maxbackup: \"10\"\n audit-log-maxsize: \"100\"\n audit-log-path: /var/log/kubernetes/audit.log\n audit-policy-file: /etc/kubernetes/audit-policy.yml\n authorization-mode: Node,RBAC\n enable-aggregator-routing: \"true\"\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/kubernetes\n mountPath: /etc/kubernetes\n name: audit\n pathType: DirectoryOrCreate\n - hostPath: /var/log/kubernetes\n mountPath: /var/log/kubernetes\n name: audit-log\n pathType: DirectoryOrCreate\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta2\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: apiserver.cluster.local:6443\ncontrollerManager:\n extraArgs:\n bind-address: 0.0.0.0\n cluster-signing-duration: 876000h\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\ndns:\n type: CoreDNS\netcd:\n local:\n dataDir: /var/lib/etcd\n extraArgs:\n listen-metrics-urls: http://0.0.0.0:2381\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.21.14\nnetworking:\n dnsDomain: cluster.local\nscheduler:\n extraArgs:\n bind-address: 0.0.0.0\n feature-gates: EphemeralContainers=true,TTLAfterFinished=true\n extraVolumes:\n - hostPath: /etc/localtime\n mountPath: /etc/localtime\n name: localtime\n pathType: File\n readOnly: true\n",
"ClusterStatus": "apiEndpoints:\n anolios79:\n advertiseAddress: 192.168.165.128\n bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta2\nkind: ClusterStatus\n"
},
"kind": "ConfigMap",
"metadata": {
"name": "kubeadm-config",
"namespace": "kube-system"
}
}`

// unmarshal json to corev1.ConfigMap
clusterConfigurationInOneLineCm = &corev1.ConfigMap{}
err := json.Unmarshal([]byte(clusterConfigurationInOneLineJson), clusterConfigurationInOneLineCm)
Expect(err).NotTo(HaveOccurred(), "Failed to unmarshal clusterConfigurationInOneLineJson")

clusterConfigurationJsonCm = &corev1.ConfigMap{}
err = json.Unmarshal([]byte(clusterConfigurationJson), clusterConfigurationJsonCm)
Expect(err).NotTo(HaveOccurred(), "Failed to unmarshal clusterConfigurationJson")

noClusterConfigurationJsonCm = &corev1.ConfigMap{}
err = json.Unmarshal([]byte(noClusterConfigurationJson), noClusterConfigurationJsonCm)
Expect(err).NotTo(HaveOccurred(), "Failed to unmarshal noClusterConfigurationJson")

noCIDRJsonCm = &corev1.ConfigMap{}
err = json.Unmarshal([]byte(noCIDRJson), noCIDRJsonCm)
Expect(err).NotTo(HaveOccurred(), "Failed to unmarshal noCIDRJson")
})

DescribeTable("should extract CIDRs correctly",
func(testName string, cm *corev1.ConfigMap, expectedPodCIDR, expectedServiceCIDR []string, expectError bool) {
It(testName, func() {
podCIDR, serviceCIDR, err := ExtractK8sCIDRFromKubeadmConfigMap(cm)

if expectError {
Expect(err).To(HaveOccurred(), "Expected an error but got none")
} else {
Expect(err).NotTo(HaveOccurred(), "Did not expect an error but got one: %v", err)
}

Expect(podCIDR).To(Equal(expectedPodCIDR), "Pod CIDR does not match")
Expect(serviceCIDR).To(Equal(expectedServiceCIDR), "Service CIDR does not match")
})
},
Entry("ClusterConfiguration In One line",
"ClusterConfiguration In One line",
clusterConfigurationInOneLineCm,
[]string{"192.168.165.0/24"},
[]string{"245.100.128.0/18"},
false,
),
Entry("ClusterConfiguration",
"ClusterConfiguration",
clusterConfigurationJsonCm,
[]string{"192.168.165.0/24"},
[]string{"245.100.128.0/18"},
false,
),
Entry("No ClusterConfiguration",
"No ClusterConfiguration",
noClusterConfigurationJsonCm,
nil,
nil,
true,
),
Entry("No CIDR",
"No CIDR",
noCIDRJsonCm,
nil,
nil,
false,
),
)
})
16 changes: 16 additions & 0 deletions pkg/coordinatormanager/coordinatormanager_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// Copyright 2022 Authors of spidernet-io
// SPDX-License-Identifier: Apache-2.0

package coordinatormanager

import (
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

func TestCoordinatorManager(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "CoordinatorManager Suite")
}
3 changes: 2 additions & 1 deletion test/e2e/spidercoordinator/spidercoordinator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,8 @@ var _ = Describe("SpiderCoordinator", Label("spidercoordinator", "overlay"), Ser
It("Prioritize getting ClusterCIDR from kubeadm-config", Label("V00009"), func() {
GinkgoWriter.Printf("podCIDR and serviceCIDR from spidercoordinator: %v,%v\n", spc.Status.OverlayPodCIDR, spc.Status.ServiceCIDR)

podCIDR, serviceCIDr := coordinatormanager.ExtractK8sCIDRFromKubeadmConfigMap(cm)
podCIDR, serviceCIDr, err := coordinatormanager.ExtractK8sCIDRFromKubeadmConfigMap(cm)
Expect(err).NotTo(HaveOccurred(), "Failed to extract k8s CIDR from Kubeadm configMap, error is %v", err)
GinkgoWriter.Printf("podCIDR and serviceCIDR from kubeadm-config : %v,%v\n", podCIDR, serviceCIDr)

Eventually(func() bool {
Expand Down

0 comments on commit 84b8283

Please sign in to comment.